Merge branch 'perf/uncore-json-updates-1' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-misc into perf/core
Pull perf/core improvements from Andi Kleen:
This pull requests contains updates to the Intel PMU events JSON files,
plus two one liner code fixes for the JSON files (also appended as patch)
The most remarkable change is support for Sandy Bridge to Skylake
client uncore event list support.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
index 30c5469..07dbb35 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
@@ -45,7 +45,7 @@
- 1 15 SATA
- 1 16 SATA USB
- 1 17 Main
- - 1 18 SD/MMC
+ - 1 18 SD/MMC/GOP
- 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
- 1 22 USB3H0
- 1 23 USB3H1
@@ -65,7 +65,7 @@
"cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
- "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
+ "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
Example:
@@ -78,6 +78,6 @@
gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
- "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
+ "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
};
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
index a782659..ca5204b 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt
@@ -4,7 +4,6 @@
- compatible: value should be one of the following
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
- "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
"samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
"samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
index 18645e0..5837402c 100644
--- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt
@@ -11,7 +11,6 @@
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
- "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
"samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
"samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index ea9c1c9..520d61d 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -13,7 +13,7 @@
- "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
before RK3288
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
- - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108
+ - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
deleted file mode 100644
index e68ae5d..0000000
--- a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Broadcom USB3 phy binding for northstar plus SoC
-The USB3 phy is internal to the SoC and is accessed using mdio interface.
-
-Required mdio bus properties:
-- reg: Should be 0x0 for SoC internal USB3 phy
-- #address-cells: must be 1
-- #size-cells: must be 0
-
-Required USB3 PHY properties:
-- compatible: should be "brcm,nsp-usb3-phy"
-- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
-- usb3-ctrl-syscon: handler of syscon node defining physical address
- of usb3 control register.
-- #phy-cells: must be 0
-
-Required usb3 control properties:
-- compatible: should be "brcm,nsp-usb3-ctrl"
-- reg: offset and length of the control registers
-
-Example:
-
- mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- usb3_phy: usb-phy@10 {
- compatible = "brcm,nsp-usb3-phy";
- reg = <0x10>;
- usb3-ctrl-syscon = <&usb3_ctrl>;
- #phy-cells = <0>;
- status = "disabled";
- };
- };
-
- usb3_ctrl: syscon@104408 {
- compatible = "brcm,nsp-usb3-ctrl", "syscon";
- reg = <0x104408 0x3fc>;
- };
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt
index af0b366..8155dbc 100644
--- a/Documentation/extcon/intel-int3496.txt
+++ b/Documentation/extcon/intel-int3496.txt
@@ -20,3 +20,8 @@
Index 2: The output gpio for muxing of the data pins between the USB host and
the USB peripheral controller, write 1 to mux to the peripheral
controller
+
+There is a mapping between indices and GPIO connection IDs as follows
+ id index 0
+ vbus index 1
+ mux index 2
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt
index 891c694..433eaef 100644
--- a/Documentation/gcc-plugins.txt
+++ b/Documentation/gcc-plugins.txt
@@ -18,8 +18,8 @@
gcc-4.7 can be compiled by a C or a C++ compiler,
and versions 4.8+ can only be compiled by a C++ compiler.
-Currently the GCC plugin infrastructure supports only the x86, arm and arm64
-architectures.
+Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
+powerpc architectures.
This infrastructure was ported from grsecurity [6] and PaX [7].
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3c248f7..fd10689 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3377,6 +3377,69 @@
__u32 pad;
};
+4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: system ioctl
+Parameters: u64 mce_cap (out)
+Returns: 0 on success, -1 on error
+
+Returns supported MCE capabilities. The u64 mce_cap parameter
+has the same format as the MSR_IA32_MCG_CAP register. Supported
+capabilities will have the corresponding bits set.
+
+4.105 KVM_X86_SETUP_MCE
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: u64 mcg_cap (in)
+Returns: 0 on success,
+ -EFAULT if u64 mcg_cap cannot be read,
+ -EINVAL if the requested number of banks is invalid,
+ -EINVAL if requested MCE capability is not supported.
+
+Initializes MCE support for use. The u64 mcg_cap parameter
+has the same format as the MSR_IA32_MCG_CAP register and
+specifies which capabilities should be enabled. The maximum
+supported number of error-reporting banks can be retrieved when
+checking for KVM_CAP_MCE. The supported capabilities can be
+retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
+
+4.106 KVM_X86_SET_MCE
+
+Capability: KVM_CAP_MCE
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_x86_mce (in)
+Returns: 0 on success,
+ -EFAULT if struct kvm_x86_mce cannot be read,
+ -EINVAL if the bank number is invalid,
+ -EINVAL if VAL bit is not set in status field.
+
+Inject a machine check error (MCE) into the guest. The input
+parameter is:
+
+struct kvm_x86_mce {
+ __u64 status;
+ __u64 addr;
+ __u64 misc;
+ __u64 mcg_status;
+ __u8 bank;
+ __u8 pad1[7];
+ __u64 pad2[3];
+};
+
+If the MCE being reported is an uncorrected error, KVM will
+inject it as an MCE exception into the guest. If the guest
+MCG_STATUS register reports that an MCE is in progress, KVM
+causes an KVM_EXIT_SHUTDOWN vmexit.
+
+Otherwise, if the MCE is a corrected error, KVM will just
+store it in the corresponding bank (provided this bank is
+not holding a previously reported uncorrected error).
+
5. The kvm_run structure
------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index c776906f..1b0a87f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3216,7 +3216,6 @@
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
-M: Sujith Sankar <ssujith@cisco.com>
M: Govindarajulu Varadarajan <_govind@gmx.com>
M: Neel Patel <neepatel@cisco.com>
S: Supported
@@ -4776,6 +4775,12 @@
S: Maintained
F: drivers/edac/mpc85xx_edac.[ch]
+EDAC-PND2
+M: Tony Luck <tony.luck@intel.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/pnd2_edac.[ch]
+
EDAC-PASEMI
M: Egor Martovetsky <egor@pasemi.com>
L: linux-edac@vger.kernel.org
@@ -7774,13 +7779,6 @@
F: net/mac80211/
F: drivers/net/wireless/mac80211_hwsim.[ch]
-MACVLAN DRIVER
-M: Patrick McHardy <kaber@trash.net>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/macvlan.c
-F: include/linux/if_macvlan.h
-
MAILBOX API
M: Jassi Brar <jassisinghbrar@gmail.com>
L: linux-kernel@vger.kernel.org
@@ -7853,6 +7851,8 @@
MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <akarwar@marvell.com>
M: Nishant Sarmukadam <nishants@marvell.com>
+M: Ganapathi Bhat <gbhat@marvell.com>
+M: Xinming Hu <huxm@marvell.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/marvell/mwifiex/
@@ -13383,14 +13383,6 @@
S: Maintained
F: drivers/media/platform/vivid/*
-VLAN (802.1Q)
-M: Patrick McHardy <kaber@trash.net>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/macvlan.c
-F: include/linux/if_*vlan.h
-F: net/8021q/
-
VLYNQ BUS
M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only)
diff --git a/Makefile b/Makefile
index b841fb3..231e6a7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Fearless Coyote
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 02981ea..1ec8e0d 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -63,14 +63,14 @@
label = "home";
linux,code = <KEY_HOME>;
gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
- gpio-key,wakeup;
+ wakeup-source;
};
button@1 {
label = "menu";
linux,code = <KEY_MENU>;
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
- gpio-key,wakeup;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 0d341c5..e5ac1d8 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -315,6 +315,13 @@
/* ID & VBUS GPIOs provided in board dts */
};
};
+
+ tpic2810: tpic2810@60 {
+ compatible = "ti,tpic2810";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
};
&mcspi3 {
@@ -330,13 +337,6 @@
spi-max-frequency = <1000000>;
spi-cpol;
};
-
- tpic2810: tpic2810@60 {
- compatible = "ti,tpic2810";
- reg = <0x60>;
- gpio-controller;
- #gpio-cells = <2>;
- };
};
&uart3 {
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 4fbb089..00de62d 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
timer@20200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x20200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
local-timer@20600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x20600 0x100>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index bfd9230..ae31a58 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -48,15 +48,14 @@
};
memory {
- reg = <0x00000000 0x10000000>;
+ reg = <0x80000000 0x10000000>;
};
};
&uart0 {
- clock-frequency = <62499840>;
+ status = "okay";
};
&uart1 {
- clock-frequency = <62499840>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 3f04a40..df05e7f 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index 9fd5422..4a3ab19 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 41e7fd3..81f7843 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 477c486..c88b8fe 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index c0a499d..d503fa0 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index f7eb585..cc0363b 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 16666324..74e15a3 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
index 49f466f..dcfc975 100644
--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -121,11 +121,6 @@
};
};
-&cpu0 {
- arm-supply = <&sw1a_reg>;
- soc-supply = <&sw1c_reg>;
-};
-
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 22332be..528b4e9 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
};
usb1: ohci@00400000 {
- compatible = "atmel,sama5d2-ohci", "usb-ohci";
+ compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00400000 0x100000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 82d8c47..162e1eb 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/mfd/dbx500-prcmu.h>
#include <dt-bindings/arm/ux500_pm_domains.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ste-ab8500.h>
#include "skeleton.dtsi"
/ {
@@ -603,6 +604,11 @@
interrupt-controller;
#interrupt-cells = <2>;
+ ab8500_clock: clock-controller {
+ compatible = "stericsson,ab8500-clk";
+ #clock-cells = <1>;
+ };
+
ab8500_gpio: ab8500-gpio {
compatible = "stericsson,ab8500-gpio";
gpio-controller;
@@ -686,6 +692,8 @@
ab8500-pwm {
compatible = "stericsson,ab8500-pwm";
+ clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
+ clock-names = "intclk";
};
ab8500-debugfs {
@@ -700,6 +708,9 @@
V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
+ clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
+ clock-names = "audioclk";
+
stericsson,earpeice-cmv = <950>; /* Units in mV. */
};
@@ -1095,6 +1106,14 @@
status = "disabled";
};
+ sound {
+ compatible = "stericsson,snd-soc-mop500";
+ stericsson,cpu-dai = <&msp1 &msp3>;
+ stericsson,audio-codec = <&codec>;
+ clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
+ clock-names = "sysclk", "ulpclk", "intclk";
+ };
+
msp0: msp@80123000 {
compatible = "stericsson,ux500-msp-i2s";
reg = <0x80123000 0x1000>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index f37f9e1..9e359e4 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -186,15 +186,6 @@
status = "okay";
};
- sound {
- compatible = "stericsson,snd-soc-mop500";
-
- stericsson,cpu-dai = <&msp1 &msp3>;
- stericsson,audio-codec = <&codec>;
- clocks = <&prcmu_clk PRCMU_SYSCLK>;
- clock-names = "sysclk";
- };
-
msp0: msp@80123000 {
pinctrl-names = "default";
pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index dd5514d..ade1d0d 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -159,15 +159,6 @@
"", "", "", "", "", "", "", "";
};
- sound {
- compatible = "stericsson,snd-soc-mop500";
-
- stericsson,cpu-dai = <&msp1 &msp3>;
- stericsson,audio-codec = <&codec>;
- clocks = <&prcmu_clk PRCMU_SYSCLK>;
- clock-names = "sysclk";
- };
-
msp0: msp@80123000 {
pinctrl-names = "default";
pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 72ec0d5..bbf1c8c 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -167,7 +167,7 @@
reg = <8>;
label = "cpu";
ethernet = <&gmac>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-txid";
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a952cc0..8a3ed21 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -495,7 +495,7 @@
resets = <&ccu RST_BUS_GPU>;
assigned-clocks = <&ccu CLK_GPU>;
- assigned-clock-rates = <408000000>;
+ assigned-clock-rates = <384000000>;
};
gic: interrupt-controller@01c81000 {
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 7097c18..d6bd158 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -50,8 +50,6 @@
backlight: backlight {
compatible = "pwm-backlight";
- pinctrl-names = "default";
- pinctrl-0 = <&bl_en_pin>;
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
@@ -93,11 +91,6 @@
};
&pio {
- bl_en_pin: bl_en_pin@0 {
- pins = "PH6";
- function = "gpio_in";
- };
-
mmc0_cd_pin: mmc0_cd_pin@0 {
pins = "PB4";
function = "gpio_in";
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index f2462a6..decd388 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -188,6 +188,7 @@
CONFIG_WL18XX=m
CONFIG_WLCORE_SPI=m
CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_MOUSEDEV=m
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_ATKBD=m
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 3d89b79..a277981 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
}
+static void sama5d3_ddr_standby(void)
+{
+ u32 lpr0;
+ u32 saved_lpr0;
+
+ saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+ lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+ lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+ cpu_do_idle();
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
- { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+ { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
{ /*sentinel*/ }
};
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 093458b..c89757a 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -241,6 +241,3 @@
onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
obj-y += $(onenand-m) $(onenand-y)
-
-nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
-obj-y += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644
index f6ac027..0000000
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * gpmc-nand.c
- *
- * Copyright (C) 2009 Texas Instruments
- * Vimal Singh <vimalsingh@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/omap-gpmc.h>
-#include <linux/mtd/nand.h>
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include <asm/mach/flash.h>
-
-#include "soc.h"
-
-/* minimum size for IO mapping */
-#define NAND_IO_SIZE 4
-
-static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
-{
- /* platforms which support all ECC schemes */
- if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
- soc_is_omap54xx() || soc_is_dra7xx())
- return 1;
-
- if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
- ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
- if (cpu_is_omap24xx())
- return 0;
- else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
- return 0;
- else
- return 1;
- }
-
- /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
- * which require H/W based ECC error detection */
- if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
- ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
- (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
- return 0;
-
- /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
- if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
- ecc_opt == OMAP_ECC_HAM1_CODE_SW)
- return 1;
- else
- return 0;
-}
-
-/* This function will go away once the device-tree convertion is complete */
-static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
- struct gpmc_settings *s)
-{
- /* Enable RD PIN Monitoring Reg */
- if (gpmc_nand_data->dev_ready) {
- s->wait_on_read = true;
- s->wait_on_write = true;
- }
-
- if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
- s->device_width = GPMC_DEVWIDTH_16BIT;
- else
- s->device_width = GPMC_DEVWIDTH_8BIT;
-}
-
-int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
- struct gpmc_timings *gpmc_t)
-{
- int err = 0;
- struct gpmc_settings s;
- struct platform_device *pdev;
- struct resource gpmc_nand_res[] = {
- { .flags = IORESOURCE_MEM, },
- { .flags = IORESOURCE_IRQ, },
- { .flags = IORESOURCE_IRQ, },
- };
-
- BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
-
- err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
- (unsigned long *)&gpmc_nand_res[0].start);
- if (err < 0) {
- pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
- gpmc_nand_data->cs, err);
- return err;
- }
- gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
- gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
- gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
-
- memset(&s, 0, sizeof(struct gpmc_settings));
- gpmc_set_legacy(gpmc_nand_data, &s);
-
- s.device_nand = true;
-
- if (gpmc_t) {
- err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
- if (err < 0) {
- pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
- err);
- return err;
- }
- }
-
- err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
- if (err < 0)
- goto out_free_cs;
-
- err = gpmc_configure(GPMC_CONFIG_WP, 0);
- if (err < 0)
- goto out_free_cs;
-
- if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
- pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
- err = -EINVAL;
- goto out_free_cs;
- }
-
-
- pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
- if (pdev) {
- err = platform_device_add_resources(pdev, gpmc_nand_res,
- ARRAY_SIZE(gpmc_nand_res));
- if (!err)
- pdev->dev.platform_data = gpmc_nand_data;
- } else {
- err = -ENOMEM;
- }
- if (err)
- goto out_free_pdev;
-
- err = platform_device_add(pdev);
- if (err) {
- dev_err(&pdev->dev, "Unable to register NAND device\n");
- goto out_free_pdev;
- }
-
- return 0;
-
-out_free_pdev:
- platform_device_put(pdev);
-out_free_cs:
- gpmc_cs_free(gpmc_nand_data->cs);
-
- return err;
-}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c70..2944af8 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
return ret;
}
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
{
int err;
struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
if (err < 0) {
dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
gpmc_onenand_data->cs, err);
- return;
+ return err;
}
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
ONENAND_IO_SIZE - 1;
- if (platform_device_register(&gpmc_onenand_device) < 0) {
+ err = platform_device_register(&gpmc_onenand_device);
+ if (err) {
dev_err(dev, "Unable to register OneNAND device\n");
gpmc_cs_free(gpmc_onenand_data->cs);
- return;
}
+
+ return err;
}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2..4c6f14c 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
#include "omap44xx.h"
@@ -66,7 +67,7 @@
cmp r0, r4
bne wait_2
ldr r12, =API_HYP_ENTRY
- adr r0, hyp_boot
+ badr r0, hyp_boot
smc #0
hyp_boot:
b omap_secondary_startup
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 56f917e..1435fee 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
};
/* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
+ {
+ .pa_start = OMAP34XX_SR1_BASE,
+ .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { },
+};
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr1_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU,
};
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap36xx_sr1_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU,
};
/* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
+ {
+ .pa_start = OMAP34XX_SR2_BASE,
+ .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { },
+};
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr2_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU,
};
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap36xx_sr2_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU,
};
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
* Return: 0 if device named @dev_name is not likely to be accessible,
* or 1 if it is likely to be accessible.
*/
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
- const char *dev_name)
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
+ const char *dev_name)
{
+ struct device_node *node;
+ bool available;
+
if (!bus)
- return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
+ return omap_type() == OMAP2_DEVICE_TYPE_GP;
- if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
- return 1;
+ node = of_get_child_by_name(bus, dev_name);
+ available = of_device_is_available(node);
+ of_node_put(node);
- return 0;
+ return available;
}
int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
r = omap_hwmod_register_links(h_sham);
- if (r < 0)
+ if (r < 0) {
+ of_node_put(bus);
return r;
+ }
}
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
r = omap_hwmod_register_links(h_aes);
- if (r < 0)
+ if (r < 0) {
+ of_node_put(bus);
return r;
+ }
}
+ of_node_put(bus);
/*
* Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 3c2cb5d..0bb0e9c 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -411,3 +411,4 @@
394 common pkey_mprotect sys_pkey_mprotect
395 common pkey_alloc sys_pkey_alloc
396 common pkey_free sys_pkey_free
+397 common statx sys_statx
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8c7c244..3741859 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1073,6 +1073,10 @@
def_bool y
depends on COMPAT && SYSVIPC
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
endmenu
menu "Power management options"
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 9f9e203..bcb03fc 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -114,6 +114,7 @@
pcie0: pcie@20020000 {
compatible = "brcm,iproc-pcie";
reg = <0 0x20020000 0 0x1000>;
+ dma-coherent;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
@@ -144,6 +145,7 @@
pcie4: pcie@50020000 {
compatible = "brcm,iproc-pcie";
reg = <0 0x50020000 0 0x1000>;
+ dma-coherent;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
@@ -174,6 +176,7 @@
pcie8: pcie@60c00000 {
compatible = "brcm,iproc-pcie-paxc";
reg = <0 0x60c00000 0 0x1000>;
+ dma-coherent;
linux,pci-domain = <8>;
bus-range = <0x0 0x1>;
@@ -203,6 +206,7 @@
<0x61030000 0x100>;
reg-names = "amac_base", "idm_base", "nicpm_base";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
phy-handle = <&gphy0>;
phy-mode = "rgmii";
status = "disabled";
@@ -213,6 +217,7 @@
reg = <0x612c0000 0x445>; /* PDC FS0 regs */
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -222,6 +227,7 @@
reg = <0x612e0000 0x445>; /* PDC FS1 regs */
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -231,6 +237,7 @@
reg = <0x61300000 0x445>; /* PDC FS2 regs */
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -240,6 +247,7 @@
reg = <0x61320000 0x445>; /* PDC FS3 regs */
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -644,6 +652,7 @@
sata: ahci@663f2000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x663f2000 0x1000>;
+ dma-coherent;
reg-names = "ahci";
interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -667,6 +676,7 @@
compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66420000 0x100>;
interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
bus-width = <8>;
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
status = "disabled";
@@ -676,6 +686,7 @@
compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66430000 0x100>;
interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
bus-width = <8>;
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
status = "disabled";
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 05310ad..f31c48d 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
static inline bool system_uses_ttbr0_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
- !cpus_have_cap(ARM64_HAS_PAN);
+ !cpus_have_const_cap(ARM64_HAS_PAN);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e78ac26..bdbeb06 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 394
+#define __NR_compat_syscalls 398
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index b7e8ef1..c66b51a 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
__SYSCALL(__NR_preadv2, compat_sys_preadv2)
#define __NR_pwritev2 393
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 394
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 395
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 396
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 397
+__SYSCALL(__NR_statx, sys_statx)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 75a0f8a..fd69108 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
}
/**
- * cpu_suspend() - function to enter a low-power idle state
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
* @arg: argument to pass to CPU suspend operations
*
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24e..d7e90d9 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image.
+ * happens, increase the KASLR offset by the size of the kernel image
+ * rounded up by SWAPPER_BLOCK_SIZE.
*/
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
- offset = (offset + (u64)(_end - _text)) & mask;
+ (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+ u64 kimg_sz = _end - _text;
+ offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+ & mask;
+ }
if (IS_ENABLED(CONFIG_KASAN))
/*
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a07aae..c5c4594 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
return 0;
}
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return NOTIFY_DONE;
-}
-
static void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 55d1e92..687a358 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -162,7 +162,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
+ pfn_to_nid(virt_to_pfn(lm_alias(_text))));
/*
* vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 048bf07..531cb9e 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -25,6 +25,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -60,6 +61,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -383,6 +390,7 @@
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -564,6 +571,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index d4de249..ca91d39 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -26,6 +26,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -58,6 +59,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -362,6 +369,7 @@
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -523,6 +530,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index fc0fd3f..23a3d8a 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -25,6 +25,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -58,6 +59,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -372,6 +379,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_ATARILANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@
# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -544,6 +551,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 52e984a..95deb95 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -25,6 +25,7 @@
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -56,6 +57,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -361,6 +368,7 @@
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -515,6 +522,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index aaeed44..afae695 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -26,6 +26,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -58,6 +59,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -363,6 +370,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_HPLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -525,6 +532,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3bbc9b2..b010734 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -25,6 +25,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -57,6 +58,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -379,6 +386,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -547,6 +554,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8f2c0de..0e41454 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -21,6 +21,7 @@
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -67,6 +68,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -419,6 +426,7 @@
CONFIG_MVME147_NET=y
CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@
# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
@@ -627,6 +634,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c743dd2..b2e687a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -25,6 +25,7 @@
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68030=y
@@ -55,6 +56,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -361,6 +368,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MVME147_NET=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -515,6 +522,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2ccaca8..cbd8ee2 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -25,6 +25,7 @@
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -56,6 +57,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -361,6 +368,7 @@
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -515,6 +522,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5599f3f..1e82cc9 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -26,6 +26,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -56,6 +57,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -369,6 +376,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
@@ -538,6 +545,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 313bf0a..f9e77f5 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -25,6 +25,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3=y
@@ -53,6 +54,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -359,6 +366,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -517,6 +524,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38b6136..3c394fc 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -25,6 +25,7 @@
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3X=y
@@ -53,6 +54,7 @@
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -359,6 +366,7 @@
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -517,6 +524,8 @@
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d..dda58cf 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
-static inline int test_bit(int nr, const unsigned long *vaddr)
+static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82..aab1edd 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 379
+#define NR_syscalls 380
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf..25589f5 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
#define __NR_copy_file_range 376
#define __NR_preadv2 377
#define __NR_pwritev2 378
+#define __NR_statx 379
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d6fd6d9..8c9fcfa 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -399,3 +399,4 @@
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
+ .long sys_statx
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index 5fcb9ac..f0a5d8b 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return val;
}
-#define xchg(ptr, with) \
- ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, with) \
+ ({ \
+ (__typeof__(*(ptr))) __xchg((unsigned long)(with), \
+ (ptr), \
+ sizeof(*(ptr))); \
+ })
#endif /* __ASM_OPENRISC_CMPXCHG_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 140faa1..1311e6b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -211,7 +211,7 @@ do { \
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
- case 8: __get_user_asm2(x, ptr, retval); \
+ case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 5c4695d..ee3e604 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -30,6 +30,7 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
DECLARE_EXPORT(__ashrdi3);
DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ucmpdi2);
+EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(memset);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 828a291..f8da545 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
}
void (*pm_power_off) (void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
/*
* When a process does an "exec", machine state like FPU and debug
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c..c7e15cc 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates. Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
- flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
- void *cursor = vaddr;
-
- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(cursor);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- flush_kernel_dcache_page(page);
- }
- flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index fb4382c..edfbf9d 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -32,7 +32,8 @@
* that put_user is the same as __put_user, etc.
*/
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size) \
+ ( (uaddr) == (uaddr) )
#define put_user __put_user
#define get_user __get_user
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 6b0741e..667c994 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -362,8 +362,9 @@
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_preadv2 (__NR_Linux + 347)
#define __NR_pwritev2 (__NR_Linux + 348)
+#define __NR_statx (__NR_Linux + 349)
-#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls (__NR_statx + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0dc72d5..c32a090 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index a0ecdb4..c66c943 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
+ case R_PARISC_SECREL32:
+ /* 32-bit section relative address. */
+ *loc = fsel(val, addend);
+ break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
+ case R_PARISC_SECREL32:
+ /* 32-bit section relative address. */
+ *loc = fsel(val, addend);
+ break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if(in_local(me, (void *)(val + addend))) {
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index e282a51..6017a5a 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -39,7 +39,7 @@
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
- * on every box.
+ * on every box.
*/
#include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
/*
* configure:
*
- * Configure the cpu with a given data image. First turn off the counters,
+ * Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
}
/*
- * Open the device and initialize all of its memory. The device is only
+ * Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
* called on the processor that the download should happen
* on.
*/
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t image_size;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
- if (perf_processor_interface == ONYX_INTF)
+ if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
- else if (perf_processor_interface == CUDA_INTF)
+ else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
- else
+ else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
/* First check the machine type is correct for
the requested image */
- if (((perf_processor_interface == CUDA_INTF) &&
- (interface_type != CUDA_INTF)) ||
- ((perf_processor_interface == ONYX_INTF) &&
- (interface_type != ONYX_INTF)))
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
- if (((interface_type == CUDA_INTF) &&
+ if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
- ((interface_type == ONYX_INTF) &&
- (test >= MAX_ONYX_IMAGES)))
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
- if (interface_type == CUDA_INTF)
+ if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
-/*
+/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
- /*
+ /*
* We can only use the lower 32-bits, the upper 32-bits should be 0
- * anyway given this is in the kernel
+ * anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
- onyx_images[TLBMISS][15] &= 0xffffff00;
+ onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[BIG_CPI][15] &= 0xffffff00;
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
- cuda_images[TLBMISS][16] =
+ cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBMISS][17] =
+ cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[TLBHANDMISS][16] =
+ cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBHANDMISS][17] =
+ cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[BIG_CPI][16] =
+ cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[BIG_CPI][17] =
+ cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
/*
* ioctl routine
- * All routines effect the processor that they are executed on. Thus you
+ * All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
/* copy out the Counters */
- if (copy_to_user((void __user *)arg, raddr,
+ if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
.open = perf_open,
.release = perf_release
};
-
+
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
-
+
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
userbuf[22] = 0;
userbuf[23] = 0;
- /*
+ /*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
} else {
/*
- * Read RDR-15 which contains the counters and sticky bits
+ * Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
- /*
+ /*
* Clear out the counters
*/
perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
-
+
return 0;
}
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
- }
+ }
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+ if (!runway) {
+ pr_err("perf_write_image: ioremap failed!\n");
+ return -ENOMEM;
+ }
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
- __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
-
+
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
- return 0;
+ return 0;
}
/*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
- }
+ }
}
printk("perf_rdr_write done\n");
}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 06f7ca7..b76f503 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -142,6 +142,8 @@ void machine_power_off(void)
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
+
+ for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3cfef1d..44aeaa9 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -444,6 +444,7 @@
ENTRY_SAME(copy_file_range)
ENTRY_COMP(preadv2)
ENTRY_COMP(pwritev2)
+ ENTRY_SAME(statx)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4b369d8..1c94708 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
COMPAT_SYS_SPU(preadv2)
COMPAT_SYS_SPU(pwritev2)
SYSCALL(kexec_file_load)
+SYSCALL(statx)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index eb1acee..9ba11db 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 383
+#define NR_syscalls 384
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 2f26335..b85f142 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -393,5 +393,6 @@
#define __NR_preadv2 380
#define __NR_pwritev2 381
#define __NR_kexec_file_load 382
+#define __NR_statx 383
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 9957287..6fd0821 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@
_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
/*
- * Before entering any idle state, the NVGPRs are saved in the stack
- * and they are restored before switching to the process context. Hence
- * until they are restored, they are free to be used.
+ * Before entering any idle state, the NVGPRs are saved in the stack.
+ * If there was a state loss, or PACA_NAPSTATELOST was set, then the
+ * NVGPRs are restored. If we are here, it is likely that state is lost,
+ * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
+ * here are the same as the test to restore NVGPRS:
+ * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
+ * and SRR1 test for restoring NVGPRs.
+ *
+ * We are about to clobber NVGPRs now, so set NAPSTATELOST to
+ * guarantee they will always be restored. This might be tightened
+ * with careful reading of specs (particularly for ISA300) but this
+ * is already a slow wakeup path and it's simpler to be safe.
+ */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
+
+ /*
*
* Save SRR1 and LR in NVGPRs as they might be clobbered in
* opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9be9920..c22f207 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -397,8 +397,7 @@ static void early_check_vec5(void)
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
- /* We don't yet have the machinery to do radix as a guest. */
- if (disable_radix || !(mfmsr() & MSR_HV))
+ if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
/*
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 251060c..8b1fe89 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
- mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
+
+ if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+ mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
}
void radix_init_pseries(void)
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index b28200d..3641e24 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "perf/amd_iommu: " fmt
+
#include <linux/perf_event.h>
#include <linux/init.h>
#include <linux/cpumask.h>
@@ -21,44 +23,42 @@
#define COUNTER_SHIFT 16
-#define _GET_BANK(ev) ((u8)(ev->hw.extra_reg.reg >> 8))
-#define _GET_CNTR(ev) ((u8)(ev->hw.extra_reg.reg))
+/* iommu pmu conf masks */
+#define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
+#define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
+#define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL)
+#define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL)
-/* iommu pmu config masks */
-#define _GET_CSOURCE(ev) ((ev->hw.config & 0xFFULL))
-#define _GET_DEVID(ev) ((ev->hw.config >> 8) & 0xFFFFULL)
-#define _GET_PASID(ev) ((ev->hw.config >> 24) & 0xFFFFULL)
-#define _GET_DOMID(ev) ((ev->hw.config >> 40) & 0xFFFFULL)
-#define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config) & 0xFFFFULL)
-#define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL)
-#define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL)
+/* iommu pmu conf1 masks */
+#define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL)
+#define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
+#define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
-static struct perf_amd_iommu __perf_iommu;
+#define IOMMU_NAME_SIZE 16
struct perf_amd_iommu {
+ struct list_head list;
struct pmu pmu;
+ struct amd_iommu *iommu;
+ char name[IOMMU_NAME_SIZE];
u8 max_banks;
u8 max_counters;
u64 cntr_assign_mask;
raw_spinlock_t lock;
- const struct attribute_group *attr_groups[4];
};
-#define format_group attr_groups[0]
-#define cpumask_group attr_groups[1]
-#define events_group attr_groups[2]
-#define null_group attr_groups[3]
+static LIST_HEAD(perf_amd_iommu_list);
/*---------------------------------------------
* sysfs format attributes
*---------------------------------------------*/
PMU_FORMAT_ATTR(csource, "config:0-7");
PMU_FORMAT_ATTR(devid, "config:8-23");
-PMU_FORMAT_ATTR(pasid, "config:24-39");
-PMU_FORMAT_ATTR(domid, "config:40-55");
+PMU_FORMAT_ATTR(domid, "config:24-39");
+PMU_FORMAT_ATTR(pasid, "config:40-59");
PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
-PMU_FORMAT_ATTR(pasid_mask, "config1:16-31");
-PMU_FORMAT_ATTR(domid_mask, "config1:32-47");
+PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
+PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
static struct attribute *iommu_format_attrs[] = {
&format_attr_csource.attr,
@@ -79,6 +79,10 @@ static struct attribute_group amd_iommu_format_group = {
/*---------------------------------------------
* sysfs events attributes
*---------------------------------------------*/
+static struct attribute_group amd_iommu_events_group = {
+ .name = "events",
+};
+
struct amd_iommu_event_desc {
struct kobj_attribute attr;
const char *event;
@@ -150,30 +154,34 @@ static struct attribute_group amd_iommu_cpumask_group = {
/*---------------------------------------------*/
-static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu)
+static int get_next_avail_iommu_bnk_cntr(struct perf_event *event)
{
+ struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
+ int max_cntrs = piommu->max_counters;
+ int max_banks = piommu->max_banks;
+ u32 shift, bank, cntr;
unsigned long flags;
- int shift, bank, cntr, retval;
- int max_banks = perf_iommu->max_banks;
- int max_cntrs = perf_iommu->max_counters;
+ int retval;
- raw_spin_lock_irqsave(&perf_iommu->lock, flags);
+ raw_spin_lock_irqsave(&piommu->lock, flags);
for (bank = 0, shift = 0; bank < max_banks; bank++) {
for (cntr = 0; cntr < max_cntrs; cntr++) {
shift = bank + (bank*3) + cntr;
- if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) {
+ if (piommu->cntr_assign_mask & BIT_ULL(shift)) {
continue;
} else {
- perf_iommu->cntr_assign_mask |= (1ULL<<shift);
- retval = ((u16)((u16)bank<<8) | (u8)(cntr));
+ piommu->cntr_assign_mask |= BIT_ULL(shift);
+ event->hw.iommu_bank = bank;
+ event->hw.iommu_cntr = cntr;
+ retval = 0;
goto out;
}
}
}
retval = -ENOSPC;
out:
- raw_spin_unlock_irqrestore(&perf_iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&piommu->lock, flags);
return retval;
}
@@ -202,8 +210,6 @@ static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu,
static int perf_iommu_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct perf_amd_iommu *perf_iommu;
- u64 config, config1;
/* test the event attr type check for PMU enumeration */
if (event->attr.type != event->pmu->type)
@@ -225,80 +231,62 @@ static int perf_iommu_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EINVAL;
- perf_iommu = &__perf_iommu;
-
- if (event->pmu != &perf_iommu->pmu)
- return -ENOENT;
-
- if (perf_iommu) {
- config = event->attr.config;
- config1 = event->attr.config1;
- } else {
- return -EINVAL;
- }
-
- /* integrate with iommu base devid (0000), assume one iommu */
- perf_iommu->max_banks =
- amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID);
- perf_iommu->max_counters =
- amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID);
- if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0))
- return -EINVAL;
-
/* update the hw_perf_event struct with the iommu config data */
- hwc->config = config;
- hwc->extra_reg.config = config1;
+ hwc->conf = event->attr.config;
+ hwc->conf1 = event->attr.config1;
return 0;
}
+static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
+{
+ return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
+}
+
static void perf_iommu_enable_event(struct perf_event *ev)
{
- u8 csource = _GET_CSOURCE(ev);
- u16 devid = _GET_DEVID(ev);
+ struct amd_iommu *iommu = perf_event_2_iommu(ev);
+ struct hw_perf_event *hwc = &ev->hw;
+ u8 bank = hwc->iommu_bank;
+ u8 cntr = hwc->iommu_cntr;
u64 reg = 0ULL;
- reg = csource;
- amd_iommu_pc_get_set_reg_val(devid,
- _GET_BANK(ev), _GET_CNTR(ev) ,
- IOMMU_PC_COUNTER_SRC_REG, ®, true);
+ reg = GET_CSOURCE(hwc);
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®);
- reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32);
+ reg = GET_DEVID_MASK(hwc);
+ reg = GET_DEVID(hwc) | (reg << 32);
if (reg)
- reg |= (1UL << 31);
- amd_iommu_pc_get_set_reg_val(devid,
- _GET_BANK(ev), _GET_CNTR(ev) ,
- IOMMU_PC_DEVID_MATCH_REG, ®, true);
+ reg |= BIT(31);
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®);
- reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
+ reg = GET_PASID_MASK(hwc);
+ reg = GET_PASID(hwc) | (reg << 32);
if (reg)
- reg |= (1UL << 31);
- amd_iommu_pc_get_set_reg_val(devid,
- _GET_BANK(ev), _GET_CNTR(ev) ,
- IOMMU_PC_PASID_MATCH_REG, ®, true);
+ reg |= BIT(31);
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®);
- reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
+ reg = GET_DOMID_MASK(hwc);
+ reg = GET_DOMID(hwc) | (reg << 32);
if (reg)
- reg |= (1UL << 31);
- amd_iommu_pc_get_set_reg_val(devid,
- _GET_BANK(ev), _GET_CNTR(ev) ,
- IOMMU_PC_DOMID_MATCH_REG, ®, true);
+ reg |= BIT(31);
+ amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®);
}
static void perf_iommu_disable_event(struct perf_event *event)
{
+ struct amd_iommu *iommu = perf_event_2_iommu(event);
+ struct hw_perf_event *hwc = &event->hw;
u64 reg = 0ULL;
- amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
- _GET_BANK(event), _GET_CNTR(event),
- IOMMU_PC_COUNTER_SRC_REG, ®, true);
+ amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
+ IOMMU_PC_COUNTER_SRC_REG, ®);
}
static void perf_iommu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
- pr_debug("perf: amd_iommu:perf_iommu_start\n");
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@@ -306,10 +294,11 @@ static void perf_iommu_start(struct perf_event *event, int flags)
hwc->state = 0;
if (flags & PERF_EF_RELOAD) {
- u64 prev_raw_count = local64_read(&hwc->prev_count);
- amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
- _GET_BANK(event), _GET_CNTR(event),
- IOMMU_PC_COUNTER_REG, &prev_raw_count, true);
+ u64 prev_raw_count = local64_read(&hwc->prev_count);
+ struct amd_iommu *iommu = perf_event_2_iommu(event);
+
+ amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
+ IOMMU_PC_COUNTER_REG, &prev_raw_count);
}
perf_iommu_enable_event(event);
@@ -319,37 +308,30 @@ static void perf_iommu_start(struct perf_event *event, int flags)
static void perf_iommu_read(struct perf_event *event)
{
- u64 count = 0ULL;
- u64 prev_raw_count = 0ULL;
- u64 delta = 0ULL;
+ u64 count, prev, delta;
struct hw_perf_event *hwc = &event->hw;
- pr_debug("perf: amd_iommu:perf_iommu_read\n");
+ struct amd_iommu *iommu = perf_event_2_iommu(event);
- amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
- _GET_BANK(event), _GET_CNTR(event),
- IOMMU_PC_COUNTER_REG, &count, false);
-
- /* IOMMU pc counter register is only 48 bits */
- count &= 0xFFFFFFFFFFFFULL;
-
- prev_raw_count = local64_read(&hwc->prev_count);
- if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- count) != prev_raw_count)
+ if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
+ IOMMU_PC_COUNTER_REG, &count))
return;
- /* Handling 48-bit counter overflowing */
- delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT);
+ /* IOMMU pc counter register is only 48 bits */
+ count &= GENMASK_ULL(47, 0);
+
+ prev = local64_read(&hwc->prev_count);
+ if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
+ return;
+
+ /* Handle 48-bit counter overflow */
+ delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
delta >>= COUNTER_SHIFT;
local64_add(delta, &event->count);
-
}
static void perf_iommu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
- u64 config;
-
- pr_debug("perf: amd_iommu:perf_iommu_stop\n");
if (hwc->state & PERF_HES_UPTODATE)
return;
@@ -361,7 +343,6 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE)
return;
- config = hwc->config;
perf_iommu_read(event);
hwc->state |= PERF_HES_UPTODATE;
}
@@ -369,17 +350,12 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
static int perf_iommu_add(struct perf_event *event, int flags)
{
int retval;
- struct perf_amd_iommu *perf_iommu =
- container_of(event->pmu, struct perf_amd_iommu, pmu);
- pr_debug("perf: amd_iommu:perf_iommu_add\n");
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
/* request an iommu bank/counter */
- retval = get_next_avail_iommu_bnk_cntr(perf_iommu);
- if (retval != -ENOSPC)
- event->hw.extra_reg.reg = (u16)retval;
- else
+ retval = get_next_avail_iommu_bnk_cntr(event);
+ if (retval)
return retval;
if (flags & PERF_EF_START)
@@ -390,115 +366,124 @@ static int perf_iommu_add(struct perf_event *event, int flags)
static void perf_iommu_del(struct perf_event *event, int flags)
{
+ struct hw_perf_event *hwc = &event->hw;
struct perf_amd_iommu *perf_iommu =
container_of(event->pmu, struct perf_amd_iommu, pmu);
- pr_debug("perf: amd_iommu:perf_iommu_del\n");
perf_iommu_stop(event, PERF_EF_UPDATE);
/* clear the assigned iommu bank/counter */
clear_avail_iommu_bnk_cntr(perf_iommu,
- _GET_BANK(event),
- _GET_CNTR(event));
+ hwc->iommu_bank, hwc->iommu_cntr);
perf_event_update_userpage(event);
}
-static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
+static __init int _init_events_attrs(void)
{
- struct attribute **attrs;
- struct attribute_group *attr_group;
int i = 0, j;
+ struct attribute **attrs;
while (amd_iommu_v2_event_descs[i].attr.attr.name)
i++;
- attr_group = kzalloc(sizeof(struct attribute *)
- * (i + 1) + sizeof(*attr_group), GFP_KERNEL);
- if (!attr_group)
+ attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL);
+ if (!attrs)
return -ENOMEM;
- attrs = (struct attribute **)(attr_group + 1);
for (j = 0; j < i; j++)
attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr;
- attr_group->name = "events";
- attr_group->attrs = attrs;
- perf_iommu->events_group = attr_group;
-
+ amd_iommu_events_group.attrs = attrs;
return 0;
}
-static __init void amd_iommu_pc_exit(void)
-{
- if (__perf_iommu.events_group != NULL) {
- kfree(__perf_iommu.events_group);
- __perf_iommu.events_group = NULL;
- }
-}
+const struct attribute_group *amd_iommu_attr_groups[] = {
+ &amd_iommu_format_group,
+ &amd_iommu_cpumask_group,
+ &amd_iommu_events_group,
+ NULL,
+};
-static __init int _init_perf_amd_iommu(
- struct perf_amd_iommu *perf_iommu, char *name)
+static struct pmu iommu_pmu = {
+ .event_init = perf_iommu_event_init,
+ .add = perf_iommu_add,
+ .del = perf_iommu_del,
+ .start = perf_iommu_start,
+ .stop = perf_iommu_stop,
+ .read = perf_iommu_read,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = amd_iommu_attr_groups,
+};
+
+static __init int init_one_iommu(unsigned int idx)
{
+ struct perf_amd_iommu *perf_iommu;
int ret;
+ perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
+ if (!perf_iommu)
+ return -ENOMEM;
+
raw_spin_lock_init(&perf_iommu->lock);
- /* Init format attributes */
- perf_iommu->format_group = &amd_iommu_format_group;
+ perf_iommu->pmu = iommu_pmu;
+ perf_iommu->iommu = get_amd_iommu(idx);
+ perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx);
+ perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
- /* Init cpumask attributes to only core 0 */
- cpumask_set_cpu(0, &iommu_cpumask);
- perf_iommu->cpumask_group = &amd_iommu_cpumask_group;
-
- /* Init events attributes */
- if (_init_events_attrs(perf_iommu) != 0)
- pr_err("perf: amd_iommu: Only support raw events.\n");
-
- /* Init null attributes */
- perf_iommu->null_group = NULL;
- perf_iommu->pmu.attr_groups = perf_iommu->attr_groups;
-
- ret = perf_pmu_register(&perf_iommu->pmu, name, -1);
- if (ret) {
- pr_err("perf: amd_iommu: Failed to initialized.\n");
- amd_iommu_pc_exit();
- } else {
- pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n",
- amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID),
- amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID));
+ if (!perf_iommu->iommu ||
+ !perf_iommu->max_banks ||
+ !perf_iommu->max_counters) {
+ kfree(perf_iommu);
+ return -EINVAL;
}
+ snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
+
+ ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
+ if (!ret) {
+ pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
+ idx, perf_iommu->max_banks, perf_iommu->max_counters);
+ list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
+ } else {
+ pr_warn("Error initializing IOMMU %d.\n", idx);
+ kfree(perf_iommu);
+ }
return ret;
}
-static struct perf_amd_iommu __perf_iommu = {
- .pmu = {
- .task_ctx_nr = perf_invalid_context,
- .event_init = perf_iommu_event_init,
- .add = perf_iommu_add,
- .del = perf_iommu_del,
- .start = perf_iommu_start,
- .stop = perf_iommu_stop,
- .read = perf_iommu_read,
- },
- .max_banks = 0x00,
- .max_counters = 0x00,
- .cntr_assign_mask = 0ULL,
- .format_group = NULL,
- .cpumask_group = NULL,
- .events_group = NULL,
- .null_group = NULL,
-};
-
static __init int amd_iommu_pc_init(void)
{
+ unsigned int i, cnt = 0;
+ int ret;
+
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_supported())
return -ENODEV;
- _init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
+ ret = _init_events_attrs();
+ if (ret)
+ return ret;
+ /*
+ * An IOMMU PMU is specific to an IOMMU, and can function independently.
+ * So we go through all IOMMUs and ignore the one that fails init
+ * unless all IOMMU are failing.
+ */
+ for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
+ ret = init_one_iommu(i);
+ if (!ret)
+ cnt++;
+ }
+
+ if (!cnt) {
+ kfree(amd_iommu_events_group.attrs);
+ return -ENODEV;
+ }
+
+ /* Init cpumask attributes to only core 0 */
+ cpumask_set_cpu(0, &iommu_cpumask);
return 0;
}
diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h
index 845d173..62e0702 100644
--- a/arch/x86/events/amd/iommu.h
+++ b/arch/x86/events/amd/iommu.h
@@ -24,17 +24,23 @@
#define PC_MAX_SPEC_BNKS 64
#define PC_MAX_SPEC_CNTRS 16
-/* iommu pc reg masks*/
-#define IOMMU_BASE_DEVID 0x0000
+struct amd_iommu;
/* amd_iommu_init.c external support functions */
+extern int amd_iommu_get_num_iommus(void);
+
extern bool amd_iommu_pc_supported(void);
-extern u8 amd_iommu_pc_get_max_banks(u16 devid);
+extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
-extern u8 amd_iommu_pc_get_max_counters(u16 devid);
+extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
-extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr,
- u8 fxn, u64 *value, bool is_write);
+extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value);
+
+extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value);
+
+extern struct amd_iommu *get_amd_iommu(int idx);
#endif /*_PERF_EVENT_AMD_IOMMU_H_*/
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 354e9ff..ae8324d 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -28,6 +28,7 @@
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/intel_pt.h>
+#include <asm/intel-family.h>
#include "../perf_event.h"
#include "pt.h"
@@ -98,6 +99,7 @@ static struct attribute_group pt_cap_group = {
.name = "caps",
};
+PMU_FORMAT_ATTR(pt, "config:0" );
PMU_FORMAT_ATTR(cyc, "config:1" );
PMU_FORMAT_ATTR(pwr_evt, "config:4" );
PMU_FORMAT_ATTR(fup_on_ptw, "config:5" );
@@ -105,11 +107,13 @@ PMU_FORMAT_ATTR(mtc, "config:9" );
PMU_FORMAT_ATTR(tsc, "config:10" );
PMU_FORMAT_ATTR(noretcomp, "config:11" );
PMU_FORMAT_ATTR(ptw, "config:12" );
+PMU_FORMAT_ATTR(branch, "config:13" );
PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
PMU_FORMAT_ATTR(psb_period, "config:24-27" );
static struct attribute *pt_formats_attr[] = {
+ &format_attr_pt.attr,
&format_attr_cyc.attr,
&format_attr_pwr_evt.attr,
&format_attr_fup_on_ptw.attr,
@@ -117,6 +121,7 @@ static struct attribute *pt_formats_attr[] = {
&format_attr_tsc.attr,
&format_attr_noretcomp.attr,
&format_attr_ptw.attr,
+ &format_attr_branch.attr,
&format_attr_mtc_period.attr,
&format_attr_cyc_thresh.attr,
&format_attr_psb_period.attr,
@@ -197,6 +202,19 @@ static int __init pt_pmu_hw_init(void)
pt_pmu.tsc_art_den = eax;
}
+ /* model-specific quirks */
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_XEON_D:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_X:
+ /* not setting BRANCH_EN will #GP, erratum BDM106 */
+ pt_pmu.branch_en_always_on = true;
+ break;
+ default:
+ break;
+ }
+
if (boot_cpu_has(X86_FEATURE_VMX)) {
/*
* Intel SDM, 36.5 "Tracing post-VMXON" says that
@@ -263,8 +281,20 @@ static int __init pt_pmu_hw_init(void)
#define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \
RTIT_CTL_FUP_ON_PTW)
-#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \
+/*
+ * Bit 0 (TraceEn) in the attr.config is meaningless as the
+ * corresponding bit in the RTIT_CTL can only be controlled
+ * by the driver; therefore, repurpose it to mean: pass
+ * through the bit that was previously assumed to be always
+ * on for PT, thereby allowing the user to *not* set it if
+ * they so wish. See also pt_event_valid() and pt_config().
+ */
+#define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
+
+#define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \
+ RTIT_CTL_TSC_EN | \
RTIT_CTL_DISRETC | \
+ RTIT_CTL_BRANCH_EN | \
RTIT_CTL_CYC_PSB | \
RTIT_CTL_MTC | \
RTIT_CTL_PWR_EVT_EN | \
@@ -332,6 +362,33 @@ static bool pt_event_valid(struct perf_event *event)
return false;
}
+ /*
+ * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
+ * clears the assomption that BranchEn must always be enabled,
+ * as was the case with the first implementation of PT.
+ * If this bit is not set, the legacy behavior is preserved
+ * for compatibility with the older userspace.
+ *
+ * Re-using bit 0 for this purpose is fine because it is never
+ * directly set by the user; previous attempts at setting it in
+ * the attr.config resulted in -EINVAL.
+ */
+ if (config & RTIT_CTL_PASSTHROUGH) {
+ /*
+ * Disallow not setting BRANCH_EN where BRANCH_EN is
+ * always required.
+ */
+ if (pt_pmu.branch_en_always_on &&
+ !(config & RTIT_CTL_BRANCH_EN))
+ return false;
+ } else {
+ /*
+ * Disallow BRANCH_EN without the PASSTHROUGH.
+ */
+ if (config & RTIT_CTL_BRANCH_EN)
+ return false;
+ }
+
return true;
}
@@ -420,7 +477,20 @@ static void pt_config(struct perf_event *event)
}
reg = pt_config_filters(event);
- reg |= RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
+ reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN;
+
+ /*
+ * Previously, we had BRANCH_EN on by default, but now that PT has
+ * grown features outside of branch tracing, it is useful to allow
+ * the user to disable it. Setting bit 0 in the event's attr.config
+ * allows BRANCH_EN to pass through instead of being always on. See
+ * also the comment in pt_event_valid().
+ */
+ if (event->attr.config & BIT(0)) {
+ reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
+ } else {
+ reg |= RTIT_CTL_BRANCH_EN;
+ }
if (!event->attr.exclude_kernel)
reg |= RTIT_CTL_OS;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index b528e8f..0eb41d0 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -110,6 +110,7 @@ struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
bool vmx;
+ bool branch_en_always_on;
unsigned long max_nonturbo_ratio;
unsigned int tsc_art_num;
unsigned int tsc_art_den;
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index d74747b..c4eda79 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
};
void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 72277b1..50d35e3 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
*(tmp + 1) = 0;
}
-#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
- defined(CONFIG_PARAVIRT))
static inline void native_pud_clear(pud_t *pudp)
{
}
-#endif
static inline void pud_clear(pud_t *pudp)
{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1cfb36b..585ee0d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
-#ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ae32838..b2879cc23 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
return -EINVAL;
}
+ if (!enabled) {
+ ++disabled_cpus;
+ return -EINVAL;
+ }
+
if (boot_cpu_physical_apicid != -1U)
ver = boot_cpu_apic_version;
- cpu = __generic_processor_info(id, ver, enabled);
+ cpu = generic_processor_info(id, ver);
if (cpu >= 0)
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aee7ded..8ccb7ef 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
return nr_logical_cpuids++;
}
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
- if (enabled) {
- pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
- "reached. Processor %d/0x%x ignored.\n",
- max, thiscpu, apicid);
- }
+ pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+ "reached. Processor %d/0x%x ignored.\n",
+ max, thiscpu, apicid);
disabled_cpus++;
return -EINVAL;
@@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
apic->x86_32_early_logical_apicid(cpu);
#endif
set_cpu_possible(cpu, true);
-
- if (enabled) {
- num_processors++;
- physid_set(apicid, phys_cpu_present_map);
- set_cpu_present(cpu, true);
- } else {
- disabled_cpus++;
- }
+ physid_set(apicid, phys_cpu_present_map);
+ set_cpu_present(cpu, true);
+ num_processors++;
return cpu;
}
-int generic_processor_info(int apicid, int version)
-{
- return __generic_processor_info(apicid, version, true);
-}
-
int hard_smp_processor_id(void)
{
return read_apic_id();
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index c05509d..9ac2a5c 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
kernfs_unbreak_active_protection(kn);
- kernfs_put(kn);
+ kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372..b5785c1 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
+#define DISABLE_BRANCH_PROFILING
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/types.h>
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index f088ea4..a723ae94 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
spin_lock_irqsave(&desc->lock, flags);
/*
- * most handlers of type NMI_UNKNOWN never return because
- * they just assume the NMI is theirs. Just a sanity check
- * to manage expectations
+ * Indicate if there are multiple registrations on the
+ * internal NMI handler call chains (SERR and IO_CHECK).
*/
- WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4f7a983..c73a7f9 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
* the refined calibration and directly register it as a clocksource.
*/
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
+ if (boot_cpu_has(X86_FEATURE_ART))
+ art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
return 0;
}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 478d15d..0833926 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
return sizeof(*regs);
}
+#ifdef CONFIG_X86_32
+#define GCC_REALIGN_WORDS 3
+#else
+#define GCC_REALIGN_WORDS 1
+#endif
+
static bool is_last_task_frame(struct unwind_state *state)
{
- unsigned long bp = (unsigned long)state->bp;
- unsigned long regs = (unsigned long)task_pt_regs(state->task);
+ unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
+ unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
/*
* We have to check for the last task frame at two different locations
* because gcc can occasionally decide to realign the stack pointer and
- * change the offset of the stack frame by a word in the prologue of a
- * function called by head/entry code.
+ * change the offset of the stack frame in the prologue of a function
+ * called by head/entry code. Examples:
+ *
+ * <start_secondary>:
+ * push %edi
+ * lea 0x8(%esp),%edi
+ * and $0xfffffff8,%esp
+ * pushl -0x4(%edi)
+ * push %ebp
+ * mov %esp,%ebp
+ *
+ * <x86_64_start_kernel>:
+ * lea 0x8(%rsp),%r10
+ * and $0xfffffffffffffff0,%rsp
+ * pushq -0x8(%r10)
+ * push %rbp
+ * mov %rsp,%rbp
+ *
+ * Note that after aligning the stack, it pushes a duplicate copy of
+ * the return address before pushing the frame pointer.
*/
- return bp == regs - FRAME_HEADER_SIZE ||
- bp == regs - FRAME_HEADER_SIZE - sizeof(long);
+ return (state->bp == last_bp ||
+ (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
}
/*
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d..047b17a 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
{
struct kvm_pic *vpic = kvm->arch.vpic;
+ if (!vpic)
+ return;
+
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 6e219e5..289270a 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ if (!ioapic)
+ return;
+
cancel_delayed_work_sync(&ioapic->eoi_inject);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm->arch.vioapic = NULL;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 37942e4..60168cd 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
}
+void kvm_page_track_cleanup(struct kvm *kvm)
+{
+ struct kvm_page_track_notifier_head *head;
+
+ head = &kvm->arch.track_notifier_head;
+ cleanup_srcu_struct(&head->track_srcu);
+}
+
void kvm_page_track_init(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c..5fba706 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
unsigned long flags;
struct kvm_arch *vm_data = &kvm->arch;
+ if (!avic)
+ return;
+
avic_free_vm_id(vm_data->avic_vm_id);
if (vm_data->avic_logical_id_table_page)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 98e82ee..2ee00db 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}
+static inline bool cpu_has_vmx_invvpid(void)
+{
+ return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
* though it is treated as global context. The alternative is
* not failing the single-context invvpid, and it is worse.
*/
- if (enable_vpid)
+ if (enable_vpid) {
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_VPID;
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_SUPPORTED_MASK;
- else
+ } else
vmx->nested.nested_vmx_vpid_caps = 0;
if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
}
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+{
+ if (enable_ept)
+ vmx_flush_tlb(vcpu);
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
- if (!cpu_has_vmx_vpid())
+ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
+
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)
@@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+ exit_reason);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
} else {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ vmx_flush_tlb_ept_only(vcpu);
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
@@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
*/
if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(&vmx->vcpu),
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa);
+ vmx_flush_tlb_ept_only(vcpu);
+ }
}
static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
- bool nested_ept_enabled = false;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_intr_status);
}
- nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
-
/*
* Write an illegal value to APIC_ACCESS_ADDR. Later,
* nested_get_vmcs12_pages will either fix it up or
@@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
+ } else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb_ept_only(vcpu);
}
/*
@@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmx_set_efer(vcpu, vcpu->arch.efer);
/* Shadow page tables on either EPT or shadow page tables. */
- if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
return 1;
- kvm_mmu_reset_context(vcpu);
-
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
@@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE);
+ } else if (!nested_cpu_has_ept(vmcs12) &&
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb_ept_only(vcpu);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1faf620..ccbd45e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm);
kvm_iommu_unmap_guest(kvm);
- kfree(kvm->arch.vpic);
- kfree(kvm->arch.vioapic);
+ kvm_pic_destroy(kvm);
+ kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kvm_mmu_uninit_vm(kvm);
+ kvm_page_track_cleanup(kvm);
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
{
struct x86_exception fault;
- trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+ trace_kvm_async_pf_ready(work->arch.token, work->gva);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8d63d7a..4c90cfd 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
#define pr_fmt(fmt) "kasan: " fmt
#include <linux/bootmem.h>
#include <linux/kasan.h>
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 5126dfd..cd44ae7 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
* we might run off the end of the bounds table if we are on
* a 64-bit kernel and try to get 8 bytes.
*/
-int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
long __user *bd_entry_ptr)
{
u32 bd_entry_32;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index a7dbec4..3dbde04f 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -26,5 +26,6 @@
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
new file mode 100644
index 0000000..a6c3705
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
@@ -0,0 +1,82 @@
+/*
+ * Intel Merrifield power button support
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct resource mrfld_power_btn_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mrfld_power_btn_dev = {
+ .name = "msic_power_btn",
+ .id = PLATFORM_DEVID_NONE,
+ .num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
+ .resource = mrfld_power_btn_resources,
+};
+
+static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&mrfld_power_btn_dev);
+ return 0;
+ }
+
+ return platform_device_register(&mrfld_power_btn_dev);
+}
+
+static struct notifier_block mrfld_power_btn_scu_notifier = {
+ .notifier_call = mrfld_power_btn_scu_status_change,
+};
+
+static int __init register_mrfld_power_btn(void)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return -ENODEV;
+
+ /*
+ * We need to be sure that the SCU IPC is ready before
+ * PMIC power button device can be registered:
+ */
+ intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
+
+ return 0;
+}
+arch_initcall(register_mrfld_power_btn);
+
+static void __init *mrfld_power_btn_platform_data(void *info)
+{
+ struct resource *res = mrfld_power_btn_resources;
+ struct sfi_device_table_entry *pentry = info;
+
+ res->start = res->end = pentry->irq;
+ return NULL;
+}
+
+static const struct devs_id mrfld_power_btn_dev_id __initconst = {
+ .name = "bcove_power_btn",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .msic = 1,
+ .get_platform_data = &mrfld_power_btn_platform_data,
+};
+
+sfi_device(mrfld_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 86edd1e9..9e304e2 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -19,7 +19,7 @@
#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
static struct platform_device wdt_dev = {
.name = "intel_mid_wdt",
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index e793fe5..e42978d 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -17,16 +17,6 @@
#include "intel_mid_weak_decls.h"
-static void penwell_arch_setup(void);
-/* penwell arch ops */
-static struct intel_mid_ops penwell_ops = {
- .arch_setup = penwell_arch_setup,
-};
-
-static void mfld_power_off(void)
-{
-}
-
static unsigned long __init mfld_calibrate_tsc(void)
{
unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
static void __init penwell_arch_setup(void)
{
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
- pm_power_off = mfld_power_off;
}
+static struct intel_mid_ops penwell_ops = {
+ .arch_setup = penwell_arch_setup,
+};
+
void *get_penwell_ops(void)
{
return &penwell_ops;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a4546f0..08a49c6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
{
struct blk_mq_timeout_data *data = priv;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
- /*
- * If a request wasn't started before the queue was
- * marked dying, kill it here or it'll go unnoticed.
- */
- if (unlikely(blk_queue_dying(rq->q))) {
- rq->errors = -EIO;
- blk_mq_end_request(rq, rq->errors);
- }
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return;
- }
if (time_after_eq(jiffies, rq->deadline)) {
if (!blk_mark_rq_complete(rq))
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 9b43efb..186fcb9 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
+ blk_stat_flush_batch(src);
+
if (!src->nr_samples)
return;
- blk_stat_flush_batch(src);
-
dst->min = min(dst->min, src->min);
dst->max = max(dst->max, src->max);
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 4467a80..0143135 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
void __weak arch_unregister_cpu(int cpu) {}
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
- return -ENODEV;
-}
-
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
+ if (acpi_duplicate_processor_id(pr->acpi_id)) {
+ dev_err(&device->dev,
+ "Failed to get unique processor _UID (0x%x)\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
pr->acpi_id);
if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
static int nr_unique_ids __initdata;
/* The number of the duplicate processor IDs */
-static int nr_duplicate_ids __initdata;
+static int nr_duplicate_ids;
/* Used to store the unique processor IDs */
static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
};
/* Used to store the duplicate processor IDs */
-static int duplicate_processor_ids[] __initdata = {
+static int duplicate_processor_ids[] = {
[0 ... NR_CPUS - 1] = -1,
};
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
void **rv)
{
acpi_status status;
+ acpi_object_type acpi_type;
+ unsigned long long uid;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
- acpi_handle_info(handle, "Not get the processor object\n");
- else
- processor_validated_ids_update(object.processor.proc_id);
+ return false;
- return AE_OK;
+ switch (acpi_type) {
+ case ACPI_TYPE_PROCESSOR:
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ goto err;
+ uid = object.processor.proc_id;
+ break;
+
+ case ACPI_TYPE_DEVICE:
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+ if (ACPI_FAILURE(status))
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ processor_validated_ids_update(uid);
+ return true;
+
+err:
+ acpi_handle_info(handle, "Invalid processor object\n");
+ return false;
+
}
-static void __init acpi_processor_check_duplicates(void)
+void __init acpi_processor_check_duplicates(void)
{
- /* Search all processor nodes in ACPI namespace */
+ /* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
acpi_processor_ids_walk,
NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
+ NULL, NULL);
}
-bool __init acpi_processor_validate_proc_id(int proc_id)
+bool acpi_duplicate_processor_id(int proc_id)
{
int i;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 80cb5eb..34fbe02 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
acpi_wakeup_device_init();
acpi_debugger_init();
acpi_setup_sb_notify_handler();
- acpi_set_processor_mapping();
return 0;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 611a558..b933061 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
}
static int map_lapic_id(struct acpi_subtable_header *entry,
- u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
+ u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
- if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
- if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
- if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
* Retrieve the ARM CPU physical identifier (MPIDR)
*/
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
{
struct acpi_madt_generic_interrupt *gicc =
container_of(entry, struct acpi_madt_generic_interrupt, header);
- if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
}
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
- int type, u32 acpi_id, bool ignore_disabled)
+ int type, u32 acpi_id)
{
unsigned long madt_end, entry;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
- if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
if (!madt)
return PHYS_CPUID_INVALID;
- rv = map_madt_entry(madt, 1, acpi_id, true);
+ rv = map_madt_entry(madt, 1, acpi_id);
acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
-static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
- bool ignore_disabled)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
- map_gicc_mpidr(header, type, acpi_id, &phys_id,
- ignore_disabled);
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
-static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
- u32 acpi_id, bool ignore_disabled)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
phys_cpuid_t phys_id;
- phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
+ phys_id = map_mat_entry(handle, type, acpi_id);
if (invalid_phys_cpuid(phys_id))
- phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
- ignore_disabled);
+ phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
return phys_id;
}
-phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
-{
- return __acpi_get_phys_id(handle, type, acpi_id, true);
-}
-
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
- int type, id;
- u32 acpi_id;
- acpi_status status;
- acpi_object_type acpi_type;
- unsigned long long tmp;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
- status = acpi_get_type(handle, &acpi_type);
- if (ACPI_FAILURE(status))
- return false;
-
- switch (acpi_type) {
- case ACPI_TYPE_PROCESSOR:
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = object.processor.proc_id;
-
- /* validate the acpi_id */
- if(acpi_processor_validate_proc_id(acpi_id))
- return false;
- break;
- case ACPI_TYPE_DEVICE:
- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = tmp;
- break;
- default:
- return false;
- }
-
- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
- *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
- id = acpi_map_cpuid(*phys_id, acpi_id);
-
- if (id < 0)
- return false;
- *cpuid = id;
- return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
- void **rv)
-{
- phys_cpuid_t phys_id;
- int cpu_id;
-
- if (!map_processor(handle, &phys_id, &cpu_id))
- return AE_ERROR;
-
- acpi_map_cpu2node(handle, cpu_id, phys_id);
- return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
- /* Set persistent cpu <-> node mapping for all processors. */
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, set_processor_node_mapping,
- NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
u64 *phys_addr, int *ioapic_id)
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 01c9466..3afa8c1 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
return true;
if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) &&
- h->oem_revision == 0)
+ h->oem_revision == 1)
return true;
return false;
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index bf43b5d..83f1439 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
{ .compatible = "img,boston-lcd", .data = &boston_config },
{ .compatible = "mti,malta-lcd", .data = &malta_config },
{ .compatible = "mti,sead3-lcd", .data = &sead3_config },
+ { /* sentinel */ }
};
/**
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 684bda4..6bb60fb 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
return restart_syscall();
}
-void assert_held_device_hotplug(void)
-{
- lockdep_assert_held(&device_hotplug_lock);
-}
-
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index c2c14a1..08e0545 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -344,7 +344,8 @@
config BT_QCOMSMD
tristate "Qualcomm SMD based HCI support"
- depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST
+ depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+ depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n)
select BT_QCA
help
Qualcomm SMD based HCI driver.
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 4a99ac7..9959c76 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
struct amd768_priv {
void __iomem *iobase;
struct pci_dev *pcidev;
+ u32 pmbase;
};
static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@ static int __init mod_init(void)
if (pmbase == 0)
return -EIO;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
- PMBASE_SIZE, DRV_NAME)) {
+ if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
pmbase + 0xF0);
- return -EBUSY;
+ err = -EBUSY;
+ goto out;
}
- priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
- PMBASE_SIZE);
+ priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
if (!priv->iobase) {
pr_err(DRV_NAME "Cannot map ioport\n");
- return -ENOMEM;
+ err = -EINVAL;
+ goto err_iomap;
}
amd_rng.priv = (unsigned long)priv;
+ priv->pmbase = pmbase;
priv->pcidev = pdev;
pr_info(DRV_NAME " detected\n");
- return devm_hwrng_register(&pdev->dev, &amd_rng);
+ err = hwrng_register(&amd_rng);
+ if (err) {
+ pr_err(DRV_NAME " registering failed (%d)\n", err);
+ goto err_hwrng;
+ }
+ return 0;
+
+err_hwrng:
+ ioport_unmap(priv->iobase);
+err_iomap:
+ release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+ kfree(priv);
+ return err;
}
static void __exit mod_exit(void)
{
+ struct amd768_priv *priv;
+
+ priv = (struct amd768_priv *)amd_rng.priv;
+
+ hwrng_unregister(&amd_rng);
+
+ ioport_unmap(priv->iobase);
+
+ release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+ kfree(priv);
}
module_init(mod_init);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e7a2459..e1d421a 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -31,6 +31,9 @@
#include <linux/module.h>
#include <linux/pci.h>
+
+#define PFX KBUILD_MODNAME ": "
+
#define GEODE_RNG_DATA_REG 0x50
#define GEODE_RNG_STATUS_REG 0x54
@@ -82,6 +85,7 @@ static struct hwrng geode_rng = {
static int __init mod_init(void)
{
+ int err = -ENODEV;
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
void __iomem *mem;
@@ -89,27 +93,43 @@ static int __init mod_init(void)
for_each_pci_dev(pdev) {
ent = pci_match_id(pci_tbl, pdev);
- if (ent) {
- rng_base = pci_resource_start(pdev, 0);
- if (rng_base == 0)
- return -ENODEV;
-
- mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
- if (!mem)
- return -ENOMEM;
- geode_rng.priv = (unsigned long)mem;
-
- pr_info("AMD Geode RNG detected\n");
- return devm_hwrng_register(&pdev->dev, &geode_rng);
- }
+ if (ent)
+ goto found;
}
-
/* Device not found. */
- return -ENODEV;
+ goto out;
+
+found:
+ rng_base = pci_resource_start(pdev, 0);
+ if (rng_base == 0)
+ goto out;
+ err = -ENOMEM;
+ mem = ioremap(rng_base, 0x58);
+ if (!mem)
+ goto out;
+ geode_rng.priv = (unsigned long)mem;
+
+ pr_info("AMD Geode RNG detected\n");
+ err = hwrng_register(&geode_rng);
+ if (err) {
+ pr_err(PFX "RNG registering failed (%d)\n",
+ err);
+ goto err_unmap;
+ }
+out:
+ return err;
+
+err_unmap:
+ iounmap(mem);
+ goto out;
}
static void __exit mod_exit(void)
{
+ void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+ hwrng_unregister(&geode_rng);
+ iounmap(mem);
}
module_init(mod_init);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 2a558c7..3e73bcd 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -84,11 +84,14 @@ struct pp_struct {
struct ieee1284_info state;
struct ieee1284_info saved_state;
long default_inactivity;
+ int index;
};
/* should we use PARDEVICE_MAX here? */
static struct device *devices[PARPORT_MAX];
+static DEFINE_IDA(ida_index);
+
/* pp_struct.flags bitfields */
#define PP_CLAIMED (1<<0)
#define PP_EXCL (1<<1)
@@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct pardevice *pdev = NULL;
char *name;
struct pardev_cb ppdev_cb;
- int rc = 0;
+ int rc = 0, index;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
@@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp)
goto err;
}
+ index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp;
- pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
+ pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
parport_put_port(port);
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO;
+ ida_simple_remove(&ida_index, index);
goto err;
}
pp->pdev = pdev;
+ pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n");
err:
kfree(name);
@@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file)
if (pp->pdev) {
parport_unregister_device(pp->pdev);
+ ida_simple_remove(&ida_index, pp->index);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0fb39fe..67201f6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
clk->core = hw->core;
clk->dev_id = dev_id;
- clk->con_id = con_id;
+ clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
clk->max_rate = ULONG_MAX;
clk_prepare_lock();
@@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk)
hlist_del(&clk->clks_node);
clk_prepare_unlock();
+ kfree_const(clk->con_id);
kfree(clk);
}
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 924f560..00d4150 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" };
PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" };
PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" };
-PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" };
+PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" };
PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
+ /*
+ * Make uart_pll_clk a child of the gpll, as all other sources are
+ * not that usable / stable.
+ */
+ writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10),
+ reg_base + RK2928_CLKSEL_CON(13));
+
ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 695bbf9..72109d2 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -80,6 +80,7 @@
select SUNXI_CCU_DIV
select SUNXI_CCU_NK
select SUNXI_CCU_NKM
+ select SUNXI_CCU_NKMP
select SUNXI_CCU_NM
select SUNXI_CCU_MP
select SUNXI_CCU_PHASE
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index e3c084c..f54114c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
/* Fixed Factor clocks */
-static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0);
+static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
/* We hardcode the divider to 4 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 4c9a920..89e68d2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
0x150, 0, 4, 24, 2, BIT(31),
CLK_SET_RATE_PARENT);
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 22c2ca7..b583f18 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
unsigned int m, p;
u32 reg;
+ /* Adjust parent_rate according to pre-dividers */
+ ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+ -1, &parent_rate);
+
reg = readl(cmp->common.base + cmp->common.reg);
m = reg >> cmp->m.shift;
@@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned int m, p;
u32 reg;
+ /* Adjust parent_rate according to pre-dividers */
+ ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+ -1, &parent_rate);
+
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index a2b40a0..488055e 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
p = reg >> nkmp->p.shift;
p &= (1 << nkmp->p.width) - 1;
- return parent_rate * n * k >> p / m;
+ return (parent_rate * n * k >> p) / m;
}
static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 745844e..d4ca996 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/atmel_tc.h>
-#include <linux/sched_clock.h>
/*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
return (upper << 16) | lower;
}
-static u32 tc_get_cv32(void)
-{
- return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
static u64 tc_get_cycles32(struct clocksource *cs)
{
- return tc_get_cv32();
+ return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
}
static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace tc_read_sched_clock(void)
-{
- return tc_get_cv32();
-}
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
tcb_setup_single_chan(tc, best_divisor_idx);
-
- /* register sched_clock on chips with single 32 bit counter */
- sched_clock_register(tc_read_sched_clock, 32, divided_rate);
} else {
/* tclib will give us three clocks no matter what the
* underlying platform supports.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 38b9fdf..5dbdd26 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
/**
@@ -1182,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu)
for_each_cpu(j, policy->related_cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ } else {
+ policy->min = policy->user_policy.min;
+ policy->max = policy->user_policy.max;
}
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3d37219..283491f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
+static inline int32_t percent_ext_fp(int percent)
+{
+ return div_ext_fp(percent, 100);
+}
+
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -359,9 +364,7 @@ static bool driver_registered __read_mostly;
static bool acpi_ppc;
#endif
-static struct perf_limits performance_limits;
-static struct perf_limits powersave_limits;
-static struct perf_limits *limits;
+static struct perf_limits global;
static void intel_pstate_init_limits(struct perf_limits *limits)
{
@@ -372,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
limits->max_sysfs_pct = 100;
}
-static void intel_pstate_set_performance_limits(struct perf_limits *limits)
-{
- intel_pstate_init_limits(limits);
- limits->min_perf_pct = 100;
- limits->min_perf = int_ext_tofp(1);
- limits->min_sysfs_pct = 100;
-}
-
static DEFINE_MUTEX(intel_pstate_driver_lock);
static DEFINE_MUTEX(intel_pstate_limits_lock);
@@ -502,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
* correct max turbo frequency based on the turbo state.
* Also need to convert to MHz as _PSS freq is in MHz.
*/
- if (!limits->turbo_disabled)
+ if (!global.turbo_disabled)
cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
@@ -621,7 +616,7 @@ static inline void update_turbo_state(void)
cpu = all_cpu_data[0];
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
- limits->turbo_disabled =
+ global.turbo_disabled =
(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
@@ -845,12 +840,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{
- int min, hw_min, max, hw_max, cpu, range, adj_range;
- struct perf_limits *perf_limits = limits;
+ int min, hw_min, max, hw_max, cpu;
+ struct perf_limits *perf_limits = &global;
u64 value, cap;
for_each_cpu(cpu, policy->cpus) {
- int max_perf_pct, min_perf_pct;
struct cpudata *cpu_data = all_cpu_data[cpu];
s16 epp;
@@ -859,24 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
hw_min = HWP_LOWEST_PERF(cap);
- if (limits->no_turbo)
+ if (global.no_turbo)
hw_max = HWP_GUARANTEED_PERF(cap);
else
hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
- max_perf_pct = perf_limits->max_perf_pct;
- min_perf_pct = perf_limits->min_perf_pct;
+ max = fp_ext_toint(hw_max * perf_limits->max_perf);
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min = max;
+ else
+ min = fp_ext_toint(hw_max * perf_limits->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- adj_range = min_perf_pct * range / 100;
- min = hw_min + adj_range;
+
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- adj_range = max_perf_pct * range / 100;
- max = hw_min + adj_range;
-
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
@@ -969,26 +961,18 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
}
static void intel_pstate_update_policies(void)
- __releases(&intel_pstate_limits_lock)
- __acquires(&intel_pstate_limits_lock)
{
- struct perf_limits *saved_limits = limits;
int cpu;
- mutex_unlock(&intel_pstate_limits_lock);
-
for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu);
-
- mutex_lock(&intel_pstate_limits_lock);
-
- limits = saved_limits;
}
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
*(u32 *)data = val;
+ pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
intel_pstate_reset_all_pid();
return 0;
}
@@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
static ssize_t show_##file_name \
(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \
- return sprintf(buf, "%u\n", limits->object); \
+ return sprintf(buf, "%u\n", global.object); \
}
static ssize_t intel_pstate_show_status(char *buf);
@@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
}
update_turbo_state();
- if (limits->turbo_disabled)
- ret = sprintf(buf, "%u\n", limits->turbo_disabled);
+ if (global.turbo_disabled)
+ ret = sprintf(buf, "%u\n", global.turbo_disabled);
else
- ret = sprintf(buf, "%u\n", limits->no_turbo);
+ ret = sprintf(buf, "%u\n", global.no_turbo);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_limits_lock);
update_turbo_state();
- if (limits->turbo_disabled) {
+ if (global.turbo_disabled) {
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
mutex_unlock(&intel_pstate_limits_lock);
mutex_unlock(&intel_pstate_driver_lock);
return -EPERM;
}
- limits->no_turbo = clamp_t(int, input, 0, 1);
-
- intel_pstate_update_policies();
+ global.no_turbo = clamp_t(int, input, 0, 1);
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_limits_lock);
- limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
- limits->max_perf_pct = min(limits->max_policy_pct,
- limits->max_sysfs_pct);
- limits->max_perf_pct = max(limits->min_policy_pct,
- limits->max_perf_pct);
- limits->max_perf_pct = max(limits->min_perf_pct,
- limits->max_perf_pct);
- limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
-
- intel_pstate_update_policies();
+ global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+ global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
+ global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
+ global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
+ global.max_perf = percent_ext_fp(global.max_perf_pct);
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
mutex_lock(&intel_pstate_limits_lock);
- limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
- limits->min_perf_pct = max(limits->min_policy_pct,
- limits->min_sysfs_pct);
- limits->min_perf_pct = min(limits->max_policy_pct,
- limits->min_perf_pct);
- limits->min_perf_pct = min(limits->max_perf_pct,
- limits->min_perf_pct);
- limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-
- intel_pstate_update_policies();
+ global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
+ global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
+ global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
+ global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
+ global.min_perf = percent_ext_fp(global.min_perf_pct);
mutex_unlock(&intel_pstate_limits_lock);
+ intel_pstate_update_policies();
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
- if (limits->no_turbo && !limits->turbo_disabled)
+ if (global.no_turbo && !global.turbo_disabled)
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
- if (limits->no_turbo && !limits->turbo_disabled)
+ if (global.no_turbo && !global.turbo_disabled)
val |= (u64)1 << 32;
return val;
@@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf;
- struct perf_limits *perf_limits = limits;
+ struct perf_limits *perf_limits = &global;
- if (limits->no_turbo || limits->turbo_disabled)
+ if (global.no_turbo || global.turbo_disabled)
max_perf = cpu->pstate.max_pstate;
if (per_cpu_limits)
@@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
sample->busy_scaled = busy_frac * 100;
- target = limits->no_turbo || limits->turbo_disabled ?
+ target = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
target += target >> 2;
target = mul_fp(target, busy_frac);
@@ -2080,36 +2058,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
struct perf_limits *limits)
{
+ int32_t max_policy_perf, min_policy_perf;
- limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
- policy->cpuinfo.max_freq);
- limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+ max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
+ max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
if (policy->max == policy->min) {
- limits->min_policy_pct = limits->max_policy_pct;
+ min_policy_perf = max_policy_perf;
} else {
- limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
- policy->cpuinfo.max_freq);
- limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
- 0, 100);
+ min_policy_perf = div_ext_fp(policy->min,
+ policy->cpuinfo.max_freq);
+ min_policy_perf = clamp_t(int32_t, min_policy_perf,
+ 0, max_policy_perf);
}
- /* Normalize user input to [min_policy_pct, max_policy_pct] */
- limits->min_perf_pct = max(limits->min_policy_pct,
- limits->min_sysfs_pct);
- limits->min_perf_pct = min(limits->max_policy_pct,
- limits->min_perf_pct);
- limits->max_perf_pct = min(limits->max_policy_pct,
- limits->max_sysfs_pct);
- limits->max_perf_pct = max(limits->min_policy_pct,
- limits->max_perf_pct);
+ /* Normalize user input to [min_perf, max_perf] */
+ limits->min_perf = max(min_policy_perf,
+ percent_ext_fp(limits->min_sysfs_pct));
+ limits->min_perf = min(limits->min_perf, max_policy_perf);
+ limits->max_perf = min(max_policy_perf,
+ percent_ext_fp(limits->max_sysfs_pct));
+ limits->max_perf = max(min_policy_perf, limits->max_perf);
- /* Make sure min_perf_pct <= max_perf_pct */
- limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+ /* Make sure min_perf <= max_perf */
+ limits->min_perf = min(limits->min_perf, limits->max_perf);
- limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
- limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+ limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
+ limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
limits->max_perf_pct, limits->min_perf_pct);
@@ -2118,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
- struct perf_limits *perf_limits = NULL;
+ struct perf_limits *perf_limits = &global;
if (!policy->cpuinfo.max_freq)
return -ENODEV;
@@ -2141,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
- pr_debug("set performance\n");
- if (!perf_limits) {
- limits = &performance_limits;
- perf_limits = limits;
- }
- } else {
- pr_debug("set powersave\n");
- if (!perf_limits) {
- limits = &powersave_limits;
- perf_limits = limits;
- }
-
- }
-
intel_pstate_update_perf_limits(policy, perf_limits);
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -2179,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- struct perf_limits *perf_limits;
-
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
- perf_limits = &performance_limits;
- else
- perf_limits = &powersave_limits;
update_turbo_state();
- policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
- perf_limits->no_turbo ?
+ policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
cpu->pstate.max_freq :
cpu->pstate.turbo_freq;
@@ -2203,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
unsigned int max_freq, min_freq;
max_freq = policy->cpuinfo.max_freq *
- perf_limits->max_sysfs_pct / 100;
+ global.max_sysfs_pct / 100;
min_freq = policy->cpuinfo.max_freq *
- perf_limits->min_sysfs_pct / 100;
+ global.min_sysfs_pct / 100;
cpufreq_verify_within_limits(policy, min_freq, max_freq);
}
@@ -2257,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state();
- policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@@ -2277,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
return ret;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
- if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+ if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2303,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
struct cpudata *cpu = all_cpu_data[policy->cpu];
update_turbo_state();
- policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
cpufreq_verify_within_cpu_limits(policy);
@@ -2311,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
return 0;
}
-static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
- struct cpufreq_policy *policy,
- unsigned int target_freq)
-{
- unsigned int max_freq;
-
- update_turbo_state();
-
- max_freq = limits->no_turbo || limits->turbo_disabled ?
- cpu->pstate.max_freq : cpu->pstate.turbo_freq;
- policy->cpuinfo.max_freq = max_freq;
- if (policy->max > max_freq)
- policy->max = max_freq;
-
- if (target_freq > max_freq)
- target_freq = max_freq;
-
- return target_freq;
-}
-
static int intel_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -2339,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
int target_pstate;
+ update_turbo_state();
+
freqs.old = policy->cur;
- freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+ freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
switch (relation) {
@@ -2372,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
- target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+ update_turbo_state();
+
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -2427,13 +2364,7 @@ static int intel_pstate_register_driver(void)
{
int ret;
- intel_pstate_init_limits(&powersave_limits);
- intel_pstate_set_performance_limits(&performance_limits);
- if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
- intel_pstate_driver == &intel_pstate)
- limits = &performance_limits;
- else
- limits = &powersave_limits;
+ intel_pstate_init_limits(&global);
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index c5adc8c..ae948b1 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
+ /*
+ * Return if cpu_device is not setup for this CPU.
+ *
+ * This could happen if the arch did not set up cpu_device
+ * since this CPU is not in cpu_present mask and the
+ * driver did not send a correct CPU mask during registration.
+ * Without this check we would end up passing bogus
+ * value for &cpu_dev->kobj in kobject_init_and_add()
+ */
+ if (!cpu_dev)
+ return -ENODEV;
+
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev)
return -ENOMEM;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 511ab04..92d1c69 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version);
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd)
{
- struct ccp_device *ccp = ccp_get_device();
+ struct ccp_device *ccp;
unsigned long flags;
unsigned int i;
int ret;
+ /* Some commands might need to be sent to a specific device */
+ ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
+
if (!ccp)
return -ENODEV;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e5d9278f..8d0eeb4 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
goto err;
ccp_cmd = &cmd->ccp_cmd;
+ ccp_cmd->ccp = chan->ccp;
ccp_pt = &ccp_cmd->u.passthru_nomap;
ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 8d9829f..80c6db279 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
int rc = VM_FAULT_SIGBUS;
phys_addr_t phys;
pfn_t pfn;
+ unsigned int fault_size = PAGE_SIZE;
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size != dax_region->align)
+ return VM_FAULT_SIGBUS;
+
phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
vmf->pgoff);
return VM_FAULT_SIGBUS;
}
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
+ unsigned int fault_size = PMD_SIZE;
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pmd_addr < vmf->vma->vm_start ||
+ (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
pgoff);
return VM_FAULT_SIGBUS;
}
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
+ unsigned int fault_size = PUD_SIZE;
+
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pud_addr < vmf->vma->vm_start ||
+ (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
pgoff = linear_page_index(vmf->vma, pud_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
pgoff);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 82d85cce..4773f28 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -43,6 +43,7 @@
config EDAC_DEBUG
bool "Debugging"
+ select DEBUG_FS
help
This turns on debugging information for the entire EDAC subsystem.
You do so by inserting edac_module with "edac_debug_level=x." Valid
@@ -259,6 +260,15 @@
Support for error detection and correction the Intel
Skylake server Integrated Memory Controllers.
+config EDAC_PND2
+ tristate "Intel Pondicherry2"
+ depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+ help
+ Support for error detection and correction on the Intel
+ Pondicherry2 Integrated Memory Controller. This SoC IP is
+ first used on the Apollo Lake platform and Denverton
+ micro-server but may appear on others in the future.
+
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on EDAC_MM_EDAC && FSL_SOC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 88e472e..587107e 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -32,6 +32,7 @@
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
obj-$(CONFIG_EDAC_SKX) += skx_edac.o
+obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 1670d27..f683919 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
dimm->mtype = MEM_FB_DDR2;
/* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
+ if (MTR_DRAM_WIDTH(mtr) == 8)
dimm->dtype = DEV_X8;
else
dimm->dtype = DEV_X4;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index abf6ef2..37a9ba7 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
dimm->nr_pages = size_mb << 8;
dimm->grain = 8;
- dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
+ DEV_X8 : DEV_X4;
dimm->mtype = MEM_FB_DDR2;
/*
* The eccc mechanism is SDDC (aka SECC), with
* is similar to Chipkill.
*/
- dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
EDAC_S8ECD8ED : EDAC_S4ECD4ED;
ndimms++;
}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
new file mode 100644
index 0000000..928e0db
--- /dev/null
+++ b/drivers/edac/pnd2_edac.c
@@ -0,0 +1,1546 @@
+/*
+ * Driver for Pondicherry2 memory controller.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * [Derived from sb_edac.c]
+ *
+ * Translation of system physical addresses to DIMM addresses
+ * is a two stage process:
+ *
+ * First the Pondicherry 2 memory controller handles slice and channel interleaving
+ * in "sys2pmi()". This is (almost) completley common between platforms.
+ *
+ * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
+ * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_mc.h"
+#include "edac_module.h"
+#include "pnd2_edac.h"
+
+#define APL_NUM_CHANNELS 4
+#define DNV_NUM_CHANNELS 2
+#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
+
+enum type {
+ APL,
+ DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
+};
+
+struct dram_addr {
+ int chan;
+ int dimm;
+ int rank;
+ int bank;
+ int row;
+ int col;
+};
+
+struct pnd2_pvt {
+ int dimm_geom[APL_NUM_CHANNELS];
+ u64 tolm, tohm;
+};
+
+/*
+ * System address space is divided into multiple regions with
+ * different interleave rules in each. The as0/as1 regions
+ * have no interleaving at all. The as2 region is interleaved
+ * between two channels. The mot region is magic and may overlap
+ * other regions, with its interleave rules taking precedence.
+ * Addresses not in any of these regions are interleaved across
+ * all four channels.
+ */
+static struct region {
+ u64 base;
+ u64 limit;
+ u8 enabled;
+} mot, as0, as1, as2;
+
+static struct dunit_ops {
+ char *name;
+ enum type type;
+ int pmiaddr_shift;
+ int pmiidx_shift;
+ int channels;
+ int dimms_per_channel;
+ int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
+ int (*get_registers)(void);
+ int (*check_ecc)(void);
+ void (*mk_region)(char *name, struct region *rp, void *asym);
+ void (*get_dimm_config)(struct mem_ctl_info *mci);
+ int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+ struct dram_addr *daddr, char *msg);
+} *ops;
+
+static struct mem_ctl_info *pnd2_mci;
+
+#define PND2_MSG_SIZE 256
+
+/* Debug macros */
+#define pnd2_printk(level, fmt, arg...) \
+ edac_printk(level, "pnd2", fmt, ##arg)
+
+#define pnd2_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
+
+#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
+#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
+#define SELECTOR_DISABLED (-1)
+#define _4GB (1ul << 32)
+
+#define PMI_ADDRESS_WIDTH 31
+#define PND_MAX_PHYS_BIT 39
+
+#define APL_ASYMSHIFT 28
+#define DNV_ASYMSHIFT 31
+#define CH_HASH_MASK_LSB 6
+#define SLICE_HASH_MASK_LSB 6
+#define MOT_SLC_INTLV_BIT 12
+#define LOG2_PMI_ADDR_GRANULARITY 5
+#define MOT_SHIFT 24
+
+#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
+#define U64_LSHIFT(val, s) ((u64)(val) << (s))
+
+#ifdef CONFIG_X86_INTEL_SBI_APL
+#include "linux/platform_data/sbi_apl.h"
+int sbi_send(int port, int off, int op, u32 *data)
+{
+ struct sbi_apl_message sbi_arg;
+ int ret, read = 0;
+
+ memset(&sbi_arg, 0, sizeof(sbi_arg));
+
+ if (op == 0 || op == 4 || op == 6)
+ read = 1;
+ else
+ sbi_arg.data = *data;
+
+ sbi_arg.opcode = op;
+ sbi_arg.port_address = port;
+ sbi_arg.register_offset = off;
+ ret = sbi_apl_commit(&sbi_arg);
+ if (ret || sbi_arg.status)
+ edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
+ sbi_arg.status, ret, sbi_arg.data);
+
+ if (ret == 0)
+ ret = sbi_arg.status;
+
+ if (ret == 0 && read)
+ *data = sbi_arg.data;
+
+ return ret;
+}
+#else
+int sbi_send(int port, int off, int op, u32 *data)
+{
+ return -EUNATCH;
+}
+#endif
+
+static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+{
+ int ret = 0;
+
+ edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
+ switch (sz) {
+ case 8:
+ ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+ case 4:
+ ret = sbi_send(port, off, op, (u32 *)data);
+ pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
+ sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 get_mem_ctrl_hub_base_addr(void)
+{
+ struct b_cr_mchbar_lo_pci lo;
+ struct b_cr_mchbar_hi_pci hi;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+ if (pdev) {
+ pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
+ pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
+ pci_dev_put(pdev);
+ } else {
+ return 0;
+ }
+
+ if (!lo.enable) {
+ edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
+ return 0;
+ }
+
+ return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
+}
+
+static u64 get_sideband_reg_base_addr(void)
+{
+ struct pci_dev *pdev;
+ u32 hi, lo;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
+ if (pdev) {
+ pci_read_config_dword(pdev, 0x10, &lo);
+ pci_read_config_dword(pdev, 0x14, &hi);
+ pci_dev_put(pdev);
+ return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
+ } else {
+ return 0xfd000000;
+ }
+}
+
+static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+{
+ struct pci_dev *pdev;
+ char *base;
+ u64 addr;
+
+ if (op == 4) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+ if (!pdev)
+ return -ENODEV;
+
+ pci_read_config_dword(pdev, off, data);
+ pci_dev_put(pdev);
+ } else {
+ /* MMIO via memory controller hub base address */
+ if (op == 0 && port == 0x4c) {
+ addr = get_mem_ctrl_hub_base_addr();
+ if (!addr)
+ return -ENODEV;
+ } else {
+ /* MMIO via sideband register base address */
+ addr = get_sideband_reg_base_addr();
+ if (!addr)
+ return -ENODEV;
+ addr += (port << 16);
+ }
+
+ base = ioremap((resource_size_t)addr, 0x10000);
+ if (!base)
+ return -ENODEV;
+
+ if (sz == 8)
+ *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
+ *(u32 *)data = *(u32 *)(base + off);
+
+ iounmap(base);
+ }
+
+ edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
+ (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
+
+ return 0;
+}
+
+#define RD_REGP(regp, regname, port) \
+ ops->rd_reg(port, \
+ regname##_offset, \
+ regname##_r_opcode, \
+ regp, sizeof(struct regname), \
+ #regname)
+
+#define RD_REG(regp, regname) \
+ ops->rd_reg(regname ## _port, \
+ regname##_offset, \
+ regname##_r_opcode, \
+ regp, sizeof(struct regname), \
+ #regname)
+
+static u64 top_lm, top_hm;
+static bool two_slices;
+static bool two_channels; /* Both PMI channels in one slice enabled */
+
+static u8 sym_chan_mask;
+static u8 asym_chan_mask;
+static u8 chan_mask;
+
+static int slice_selector = -1;
+static int chan_selector = -1;
+static u64 slice_hash_mask;
+static u64 chan_hash_mask;
+
+static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
+{
+ rp->enabled = 1;
+ rp->base = base;
+ rp->limit = limit;
+ edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
+}
+
+static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
+{
+ if (mask == 0) {
+ pr_info(FW_BUG "MOT mask cannot be zero\n");
+ return;
+ }
+ if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
+ pr_info(FW_BUG "MOT mask not power of two\n");
+ return;
+ }
+ if (base & ~mask) {
+ pr_info(FW_BUG "MOT region base/mask alignment error\n");
+ return;
+ }
+ rp->base = base;
+ rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
+ rp->enabled = 1;
+ edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
+}
+
+static bool in_region(struct region *rp, u64 addr)
+{
+ if (!rp->enabled)
+ return false;
+
+ return rp->base <= addr && addr <= rp->limit;
+}
+
+static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
+{
+ int mask = 0;
+
+ if (!p->slice_0_mem_disabled)
+ mask |= p->sym_slice0_channel_enabled;
+
+ if (!p->slice_1_disabled)
+ mask |= p->sym_slice1_channel_enabled << 2;
+
+ if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
+ mask &= 0x5;
+
+ return mask;
+}
+
+static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
+ struct b_cr_asym_mem_region0_mchbar *as0,
+ struct b_cr_asym_mem_region1_mchbar *as1,
+ struct b_cr_asym_2way_mem_region_mchbar *as2way)
+{
+ const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ int mask = 0;
+
+ if (as2way->asym_2way_interleave_enable)
+ mask = intlv[as2way->asym_2way_intlv_mode];
+ if (as0->slice0_asym_enable)
+ mask |= (1 << as0->slice0_asym_channel_select);
+ if (as1->slice1_asym_enable)
+ mask |= (4 << as1->slice1_asym_channel_select);
+ if (p->slice_0_mem_disabled)
+ mask &= 0xc;
+ if (p->slice_1_disabled)
+ mask &= 0x3;
+ if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
+ mask &= 0x5;
+
+ return mask;
+}
+
+static struct b_cr_tolud_pci tolud;
+static struct b_cr_touud_lo_pci touud_lo;
+static struct b_cr_touud_hi_pci touud_hi;
+static struct b_cr_asym_mem_region0_mchbar asym0;
+static struct b_cr_asym_mem_region1_mchbar asym1;
+static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
+static struct b_cr_mot_out_base_mchbar mot_base;
+static struct b_cr_mot_out_mask_mchbar mot_mask;
+static struct b_cr_slice_channel_hash chash;
+
+/* Apollo Lake dunit */
+/*
+ * Validated on board with just two DIMMs in the [0] and [2] positions
+ * in this array. Other port number matches documentation, but caution
+ * advised.
+ */
+static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
+static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
+
+/* Denverton dunit */
+static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
+static struct d_cr_dsch dsch;
+static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
+static struct d_cr_drp drp[DNV_NUM_CHANNELS];
+static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
+static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
+static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
+static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
+static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
+static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
+
+static void apl_mk_region(char *name, struct region *rp, void *asym)
+{
+ struct b_cr_asym_mem_region0_mchbar *a = asym;
+
+ mk_region(name, rp,
+ U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
+ U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
+ GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
+}
+
+static void dnv_mk_region(char *name, struct region *rp, void *asym)
+{
+ struct b_cr_asym_mem_region_denverton *a = asym;
+
+ mk_region(name, rp,
+ U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
+ U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
+ GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
+}
+
+static int apl_get_registers(void)
+{
+ int i;
+
+ if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
+ return -ENODEV;
+
+ for (i = 0; i < APL_NUM_CHANNELS; i++)
+ if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int dnv_get_registers(void)
+{
+ int i;
+
+ if (RD_REG(&dsch, d_cr_dsch))
+ return -ENODEV;
+
+ for (i = 0; i < DNV_NUM_CHANNELS; i++)
+ if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
+ RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
+ RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
+ RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
+ RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
+ RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
+ RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
+ RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Read all the h/w config registers once here (they don't
+ * change at run time. Figure out which address ranges have
+ * which interleave characteristics.
+ */
+static int get_registers(void)
+{
+ const int intlv[] = { 10, 11, 12, 12 };
+
+ if (RD_REG(&tolud, b_cr_tolud_pci) ||
+ RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
+ RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
+ RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
+ RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
+ RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
+ RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
+ RD_REG(&chash, b_cr_slice_channel_hash))
+ return -ENODEV;
+
+ if (ops->get_registers())
+ return -ENODEV;
+
+ if (ops->type == DNV) {
+ /* PMI channel idx (always 0) for asymmetric region */
+ asym0.slice0_asym_channel_select = 0;
+ asym1.slice1_asym_channel_select = 0;
+ /* PMI channel bitmap (always 1) for symmetric region */
+ chash.sym_slice0_channel_enabled = 0x1;
+ chash.sym_slice1_channel_enabled = 0x1;
+ }
+
+ if (asym0.slice0_asym_enable)
+ ops->mk_region("as0", &as0, &asym0);
+
+ if (asym1.slice1_asym_enable)
+ ops->mk_region("as1", &as1, &asym1);
+
+ if (asym_2way.asym_2way_interleave_enable) {
+ mk_region("as2way", &as2,
+ U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
+ U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
+ GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
+ }
+
+ if (mot_base.imr_en) {
+ mk_region_mask("mot", &mot,
+ U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
+ U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
+ }
+
+ top_lm = U64_LSHIFT(tolud.tolud, 20);
+ top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
+
+ two_slices = !chash.slice_1_disabled &&
+ !chash.slice_0_mem_disabled &&
+ (chash.sym_slice0_channel_enabled != 0) &&
+ (chash.sym_slice1_channel_enabled != 0);
+ two_channels = !chash.ch_1_disabled &&
+ !chash.enable_pmi_dual_data_mode &&
+ ((chash.sym_slice0_channel_enabled == 3) ||
+ (chash.sym_slice1_channel_enabled == 3));
+
+ sym_chan_mask = gen_sym_mask(&chash);
+ asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
+ chan_mask = sym_chan_mask | asym_chan_mask;
+
+ if (two_slices && !two_channels) {
+ if (chash.hvm_mode)
+ slice_selector = 29;
+ else
+ slice_selector = intlv[chash.interleave_mode];
+ } else if (!two_slices && two_channels) {
+ if (chash.hvm_mode)
+ chan_selector = 29;
+ else
+ chan_selector = intlv[chash.interleave_mode];
+ } else if (two_slices && two_channels) {
+ if (chash.hvm_mode) {
+ slice_selector = 29;
+ chan_selector = 30;
+ } else {
+ slice_selector = intlv[chash.interleave_mode];
+ chan_selector = intlv[chash.interleave_mode] + 1;
+ }
+ }
+
+ if (two_slices) {
+ if (!chash.hvm_mode)
+ slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
+ if (!two_channels)
+ slice_hash_mask |= BIT_ULL(slice_selector);
+ }
+
+ if (two_channels) {
+ if (!chash.hvm_mode)
+ chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
+ if (!two_slices)
+ chan_hash_mask |= BIT_ULL(chan_selector);
+ }
+
+ return 0;
+}
+
+/* Get a contiguous memory address (remove the MMIO gap) */
+static u64 remove_mmio_gap(u64 sys)
+{
+ return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
+}
+
+/* Squeeze out one address bit, shift upper part down to fill gap */
+static void remove_addr_bit(u64 *addr, int bitidx)
+{
+ u64 mask;
+
+ if (bitidx == -1)
+ return;
+
+ mask = (1ull << bitidx) - 1;
+ *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
+}
+
+/* XOR all the bits from addr specified in mask */
+static int hash_by_mask(u64 addr, u64 mask)
+{
+ u64 result = addr & mask;
+
+ result = (result >> 32) ^ result;
+ result = (result >> 16) ^ result;
+ result = (result >> 8) ^ result;
+ result = (result >> 4) ^ result;
+ result = (result >> 2) ^ result;
+ result = (result >> 1) ^ result;
+
+ return (int)result & 1;
+}
+
+/*
+ * First stage decode. Take the system address and figure out which
+ * second stage will deal with it based on interleave modes.
+ */
+static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
+{
+ u64 contig_addr, contig_base, contig_offset, contig_base_adj;
+ int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
+ MOT_CHAN_INTLV_BIT_1SLC_2CH;
+ int slice_intlv_bit_rm = SELECTOR_DISABLED;
+ int chan_intlv_bit_rm = SELECTOR_DISABLED;
+ /* Determine if address is in the MOT region. */
+ bool mot_hit = in_region(&mot, addr);
+ /* Calculate the number of symmetric regions enabled. */
+ int sym_channels = hweight8(sym_chan_mask);
+
+ /*
+ * The amount we need to shift the asym base can be determined by the
+ * number of enabled symmetric channels.
+ * NOTE: This can only work because symmetric memory is not supposed
+ * to do a 3-way interleave.
+ */
+ int sym_chan_shift = sym_channels >> 1;
+
+ /* Give up if address is out of range, or in MMIO gap */
+ if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
+ (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
+ snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
+ return -EINVAL;
+ }
+
+ /* Get a contiguous memory address (remove the MMIO gap) */
+ contig_addr = remove_mmio_gap(addr);
+
+ if (in_region(&as0, addr)) {
+ *pmiidx = asym0.slice0_asym_channel_select;
+
+ contig_base = remove_mmio_gap(as0.base);
+ contig_offset = contig_addr - contig_base;
+ contig_base_adj = (contig_base >> sym_chan_shift) *
+ ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
+ contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
+ } else if (in_region(&as1, addr)) {
+ *pmiidx = 2u + asym1.slice1_asym_channel_select;
+
+ contig_base = remove_mmio_gap(as1.base);
+ contig_offset = contig_addr - contig_base;
+ contig_base_adj = (contig_base >> sym_chan_shift) *
+ ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
+ contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
+ } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
+ bool channel1;
+
+ mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
+ *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
+ channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
+ hash_by_mask(contig_addr, chan_hash_mask);
+ *pmiidx |= (u32)channel1;
+
+ contig_base = remove_mmio_gap(as2.base);
+ chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
+ contig_offset = contig_addr - contig_base;
+ remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
+ contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
+ } else {
+ /* Otherwise we're in normal, boring symmetric mode. */
+ *pmiidx = 0u;
+
+ if (two_slices) {
+ bool slice1;
+
+ if (mot_hit) {
+ slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
+ slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
+ } else {
+ slice_intlv_bit_rm = slice_selector;
+ slice1 = hash_by_mask(addr, slice_hash_mask);
+ }
+
+ *pmiidx = (u32)slice1 << 1;
+ }
+
+ if (two_channels) {
+ bool channel1;
+
+ mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
+ MOT_CHAN_INTLV_BIT_1SLC_2CH;
+
+ if (mot_hit) {
+ chan_intlv_bit_rm = mot_intlv_bit;
+ channel1 = (addr >> mot_intlv_bit) & 1;
+ } else {
+ chan_intlv_bit_rm = chan_selector;
+ channel1 = hash_by_mask(contig_addr, chan_hash_mask);
+ }
+
+ *pmiidx |= (u32)channel1;
+ }
+ }
+
+ /* Remove the chan_selector bit first */
+ remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
+ /* Remove the slice bit (we remove it second because it must be lower */
+ remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
+ *pmiaddr = contig_addr;
+
+ return 0;
+}
+
+/* Translate PMI address to memory (rank, row, bank, column) */
+#define C(n) (0x10 | (n)) /* column */
+#define B(n) (0x20 | (n)) /* bank */
+#define R(n) (0x40 | (n)) /* row */
+#define RS (0x80) /* rank */
+
+/* addrdec values */
+#define AMAP_1KB 0
+#define AMAP_2KB 1
+#define AMAP_4KB 2
+#define AMAP_RSVD 3
+
+/* dden values */
+#define DEN_4Gb 0
+#define DEN_8Gb 2
+
+/* dwid values */
+#define X8 0
+#define X16 1
+
+static struct dimm_geometry {
+ u8 addrdec;
+ u8 dden;
+ u8 dwid;
+ u8 rowbits, colbits;
+ u16 bits[PMI_ADDRESS_WIDTH];
+} dimms[] = {
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
+ .rowbits = 15, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ 0, 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 11,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
+ R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
+ R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
+ R(14), R(15), 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
+ .rowbits = 15, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ 0, 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 11,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
+ R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
+ R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
+ R(14), R(15), 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
+ .rowbits = 15, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
+ 0, 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
+ .rowbits = 16, .colbits = 10,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
+ R(15), 0, 0, 0
+ }
+ },
+ {
+ .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
+ .rowbits = 16, .colbits = 11,
+ .bits = {
+ C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
+ B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
+ R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
+ R(14), R(15), 0, 0
+ }
+ }
+};
+
+static int bank_hash(u64 pmiaddr, int idx, int shft)
+{
+ int bhash = 0;
+
+ switch (idx) {
+ case 0:
+ bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
+ break;
+ case 1:
+ bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
+ bhash ^= ((pmiaddr >> 22) & 1) << 1;
+ break;
+ case 2:
+ bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
+ break;
+ }
+
+ return bhash;
+}
+
+static int rank_hash(u64 pmiaddr)
+{
+ return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
+}
+
+/* Second stage decode. Compute rank, bank, row & column. */
+static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+ struct dram_addr *daddr, char *msg)
+{
+ struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
+ struct pnd2_pvt *pvt = mci->pvt_info;
+ int g = pvt->dimm_geom[pmiidx];
+ struct dimm_geometry *d = &dimms[g];
+ int column = 0, bank = 0, row = 0, rank = 0;
+ int i, idx, type, skiprs = 0;
+
+ for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
+ int bit = (pmiaddr >> i) & 1;
+
+ if (i + skiprs >= PMI_ADDRESS_WIDTH) {
+ snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
+ return -EINVAL;
+ }
+
+ type = d->bits[i + skiprs] & ~0xf;
+ idx = d->bits[i + skiprs] & 0xf;
+
+ /*
+ * On single rank DIMMs ignore the rank select bit
+ * and shift remainder of "bits[]" down one place.
+ */
+ if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
+ skiprs = 1;
+ type = d->bits[i + skiprs] & ~0xf;
+ idx = d->bits[i + skiprs] & 0xf;
+ }
+
+ switch (type) {
+ case C(0):
+ column |= (bit << idx);
+ break;
+ case B(0):
+ bank |= (bit << idx);
+ if (cr_drp0->bahen)
+ bank ^= bank_hash(pmiaddr, idx, d->addrdec);
+ break;
+ case R(0):
+ row |= (bit << idx);
+ break;
+ case RS:
+ rank = bit;
+ if (cr_drp0->rsien)
+ rank ^= rank_hash(pmiaddr);
+ break;
+ default:
+ if (bit) {
+ snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
+ return -EINVAL;
+ }
+ goto done;
+ }
+ }
+
+done:
+ daddr->col = column;
+ daddr->bank = bank;
+ daddr->row = row;
+ daddr->rank = rank;
+ daddr->dimm = 0;
+
+ return 0;
+}
+
+/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
+#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
+
+static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
+ struct dram_addr *daddr, char *msg)
+{
+ /* Rank 0 or 1 */
+ daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
+ /* Rank 2 or 3 */
+ daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
+
+ /*
+ * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
+ * flip them if DIMM1 is larger than DIMM0.
+ */
+ daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
+
+ daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
+ daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
+ daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
+ if (dsch.ddr4en)
+ daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
+ if (dmap1[pmiidx].bxor) {
+ if (dsch.ddr4en) {
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
+ if (dsch.chan_width == 0)
+ /* 64/72 bit dram channel width */
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
+ else
+ /* 32/40 bit dram channel width */
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
+ } else {
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
+ if (dsch.chan_width == 0)
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
+ else
+ daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
+ }
+ }
+
+ daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
+ if (dmap4[pmiidx].row14 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
+ if (dmap4[pmiidx].row15 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
+ if (dmap4[pmiidx].row16 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
+ if (dmap4[pmiidx].row17 != 31)
+ daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
+
+ daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
+ daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
+ if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
+ daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
+
+ return 0;
+}
+
+static int check_channel(int ch)
+{
+ if (drp0[ch].dramtype != 0) {
+ pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
+ return 1;
+ } else if (drp0[ch].eccen == 0) {
+ pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
+ return 1;
+ }
+ return 0;
+}
+
+static int apl_check_ecc_active(void)
+{
+ int i, ret = 0;
+
+ /* Check dramtype and ECC mode for each present DIMM */
+ for (i = 0; i < APL_NUM_CHANNELS; i++)
+ if (chan_mask & BIT(i))
+ ret += check_channel(i);
+ return ret ? -EINVAL : 0;
+}
+
+#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
+
+static int check_unit(int ch)
+{
+ struct d_cr_drp *d = &drp[ch];
+
+ if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
+ pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
+ return 1;
+ }
+ return 0;
+}
+
+static int dnv_check_ecc_active(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < DNV_NUM_CHANNELS; i++)
+ ret += check_unit(i);
+ return ret ? -EINVAL : 0;
+}
+
+static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
+ struct dram_addr *daddr, char *msg)
+{
+ u64 pmiaddr;
+ u32 pmiidx;
+ int ret;
+
+ ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
+ if (ret)
+ return ret;
+
+ pmiaddr >>= ops->pmiaddr_shift;
+ /* pmi channel idx to dimm channel idx */
+ pmiidx >>= ops->pmiidx_shift;
+ daddr->chan = pmiidx;
+
+ ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
+ if (ret)
+ return ret;
+
+ edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
+ addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
+
+ return 0;
+}
+
+static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
+ struct dram_addr *daddr)
+{
+ enum hw_event_mc_err_type tp_event;
+ char *optype, msg[PND2_MSG_SIZE];
+ bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
+ bool overflow = m->status & MCI_STATUS_OVER;
+ bool uc_err = m->status & MCI_STATUS_UC;
+ bool recov = m->status & MCI_STATUS_S;
+ u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+ u32 mscod = GET_BITFIELD(m->status, 16, 31);
+ u32 errcode = GET_BITFIELD(m->status, 0, 15);
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ int rc;
+
+ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
+ HW_EVENT_ERR_CORRECTED;
+
+ /*
+ * According with Table 15-9 of the Intel Architecture spec vol 3A,
+ * memory errors should fit in this mask:
+ * 000f 0000 1mmm cccc (binary)
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ * If the mask doesn't match, report an error to the parsing logic
+ */
+ if (!((errcode & 0xef80) == 0x80)) {
+ optype = "Can't parse: it is not a mem";
+ } else {
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+ }
+
+ /* Only decode errors with an valid address (ADDRV) */
+ if (!(m->status & MCI_STATUS_ADDRV))
+ return;
+
+ rc = get_memory_error_data(mci, m->addr, daddr, msg);
+ if (rc)
+ goto address_error;
+
+ snprintf(msg, sizeof(msg),
+ "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
+ overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
+ errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
+
+ edac_dbg(0, "%s\n", msg);
+
+ /* Call the helper to output message */
+ edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
+ m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
+
+ return;
+
+address_error:
+ edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
+}
+
+static void apl_get_dimm_config(struct mem_ctl_info *mci)
+{
+ struct pnd2_pvt *pvt = mci->pvt_info;
+ struct dimm_info *dimm;
+ struct d_cr_drp0 *d;
+ u64 capacity;
+ int i, g;
+
+ for (i = 0; i < APL_NUM_CHANNELS; i++) {
+ if (!(chan_mask & BIT(i)))
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
+ if (!dimm) {
+ edac_dbg(0, "No allocated DIMM for channel %d\n", i);
+ continue;
+ }
+
+ d = &drp0[i];
+ for (g = 0; g < ARRAY_SIZE(dimms); g++)
+ if (dimms[g].addrdec == d->addrdec &&
+ dimms[g].dden == d->dden &&
+ dimms[g].dwid == d->dwid)
+ break;
+
+ if (g == ARRAY_SIZE(dimms)) {
+ edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
+ continue;
+ }
+
+ pvt->dimm_geom[i] = g;
+ capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
+ (1ul << dimms[g].colbits);
+ edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
+ dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
+ dimm->grain = 32;
+ dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
+ dimm->mtype = MEM_DDR3;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
+ }
+}
+
+static const int dnv_dtypes[] = {
+ DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
+};
+
+static void dnv_get_dimm_config(struct mem_ctl_info *mci)
+{
+ int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
+ struct dimm_info *dimm;
+ struct d_cr_drp *d;
+ u64 capacity;
+
+ if (dsch.ddr4en) {
+ memtype = MEM_DDR4;
+ banks = 16;
+ colbits = 10;
+ } else {
+ memtype = MEM_DDR3;
+ banks = 8;
+ }
+
+ for (i = 0; i < DNV_NUM_CHANNELS; i++) {
+ if (dmap4[i].row14 == 31)
+ rowbits = 14;
+ else if (dmap4[i].row15 == 31)
+ rowbits = 15;
+ else if (dmap4[i].row16 == 31)
+ rowbits = 16;
+ else if (dmap4[i].row17 == 31)
+ rowbits = 17;
+ else
+ rowbits = 18;
+
+ if (memtype == MEM_DDR3) {
+ if (dmap1[i].ca11 != 0x3f)
+ colbits = 12;
+ else
+ colbits = 10;
+ }
+
+ d = &drp[i];
+ /* DIMM0 is present if rank0 and/or rank1 is enabled */
+ ranks_of_dimm[0] = d->rken0 + d->rken1;
+ /* DIMM1 is present if rank2 and/or rank3 is enabled */
+ ranks_of_dimm[1] = d->rken2 + d->rken3;
+
+ for (j = 0; j < DNV_MAX_DIMMS; j++) {
+ if (!ranks_of_dimm[j])
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
+ if (!dimm) {
+ edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
+ continue;
+ }
+
+ capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
+ edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
+ dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
+ dimm->grain = 32;
+ dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
+ dimm->mtype = memtype;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
+ }
+ }
+}
+
+static int pnd2_register_mci(struct mem_ctl_info **ppmci)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct pnd2_pvt *pvt;
+ int rc;
+
+ rc = ops->check_ecc();
+ if (rc < 0)
+ return rc;
+
+ /* Allocate a new MC control structure */
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = ops->channels;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = ops->dimms_per_channel;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (!mci)
+ return -ENOMEM;
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+
+ mci->mod_name = "pnd2_edac.c";
+ mci->dev_name = ops->name;
+ mci->ctl_name = "Pondicherry2";
+
+ /* Get dimm basic config and the memory layout */
+ ops->get_dimm_config(mci);
+
+ if (edac_mc_add_mc(mci)) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return -EINVAL;
+ }
+
+ *ppmci = mci;
+
+ return 0;
+}
+
+static void pnd2_unregister_mci(struct mem_ctl_info *mci)
+{
+ if (unlikely(!mci || !mci->pvt_info)) {
+ pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
+ return;
+ }
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(NULL);
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+ edac_mc_free(mci);
+}
+
+/*
+ * Callback function registered with core kernel mce code.
+ * Called once for each logged error.
+ */
+static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct mem_ctl_info *mci;
+ struct dram_addr daddr;
+ char *type;
+
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
+ mci = pnd2_mci;
+ if (!mci)
+ return NOTIFY_DONE;
+
+ /*
+ * Just let mcelog handle it if the error is
+ * outside the memory controller. A memory error
+ * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
+ * bit 12 has an special meaning.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
+ pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
+ mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
+ pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
+ pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
+ pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
+ pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
+ mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
+
+ pnd2_mce_output_error(mci, mce, &daddr);
+
+ /* Advice mcelog that the error were handled */
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block pnd2_mce_dec = {
+ .notifier_call = pnd2_mce_check_error,
+};
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static u64 pnd2_fake_addr;
+#define PND2_BLOB_SIZE 1024
+static char pnd2_result[PND2_BLOB_SIZE];
+static struct dentry *pnd2_test;
+static struct debugfs_blob_wrapper pnd2_blob = {
+ .data = pnd2_result,
+ .size = 0
+};
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct dram_addr daddr;
+ struct mce m;
+
+ *(u64 *)data = val;
+ m.mcgstatus = 0;
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x9f;
+ m.addr = val;
+ pnd2_mce_output_error(pnd2_mci, &m, &daddr);
+ snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
+ "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
+ m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
+ pnd2_blob.size = strlen(pnd2_blob.data);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_pnd2_debug(void)
+{
+ pnd2_test = edac_debugfs_create_dir("pnd2_test");
+ edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
+ &pnd2_fake_addr, &fops_u64_wo);
+ debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
+}
+
+static void teardown_pnd2_debug(void)
+{
+ debugfs_remove_recursive(pnd2_test);
+}
+#else
+static void setup_pnd2_debug(void) {}
+static void teardown_pnd2_debug(void) {}
+#endif /* CONFIG_EDAC_DEBUG */
+
+
+static int pnd2_probe(void)
+{
+ int rc;
+
+ edac_dbg(2, "\n");
+ rc = get_registers();
+ if (rc)
+ return rc;
+
+ return pnd2_register_mci(&pnd2_mci);
+}
+
+static void pnd2_remove(void)
+{
+ edac_dbg(0, "\n");
+ pnd2_unregister_mci(pnd2_mci);
+}
+
+static struct dunit_ops apl_ops = {
+ .name = "pnd2/apl",
+ .type = APL,
+ .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
+ .pmiidx_shift = 0,
+ .channels = APL_NUM_CHANNELS,
+ .dimms_per_channel = 1,
+ .rd_reg = apl_rd_reg,
+ .get_registers = apl_get_registers,
+ .check_ecc = apl_check_ecc_active,
+ .mk_region = apl_mk_region,
+ .get_dimm_config = apl_get_dimm_config,
+ .pmi2mem = apl_pmi2mem,
+};
+
+static struct dunit_ops dnv_ops = {
+ .name = "pnd2/dnv",
+ .type = DNV,
+ .pmiaddr_shift = 0,
+ .pmiidx_shift = 1,
+ .channels = DNV_NUM_CHANNELS,
+ .dimms_per_channel = 2,
+ .rd_reg = dnv_rd_reg,
+ .get_registers = dnv_get_registers,
+ .check_ecc = dnv_check_ecc_active,
+ .mk_region = dnv_mk_region,
+ .get_dimm_config = dnv_get_dimm_config,
+ .pmi2mem = dnv_pmi2mem,
+};
+
+static const struct x86_cpu_id pnd2_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
+
+static int __init pnd2_init(void)
+{
+ const struct x86_cpu_id *id;
+ int rc;
+
+ edac_dbg(2, "\n");
+
+ id = x86_match_cpu(pnd2_cpuids);
+ if (!id)
+ return -ENODEV;
+
+ ops = (struct dunit_ops *)id->driver_data;
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ rc = pnd2_probe();
+ if (rc < 0) {
+ pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
+ return rc;
+ }
+
+ if (!pnd2_mci)
+ return -ENODEV;
+
+ mce_register_decode_chain(&pnd2_mce_dec);
+ setup_pnd2_debug();
+
+ return 0;
+}
+
+static void __exit pnd2_exit(void)
+{
+ edac_dbg(2, "\n");
+ teardown_pnd2_debug();
+ mce_unregister_decode_chain(&pnd2_mce_dec);
+ pnd2_remove();
+}
+
+module_init(pnd2_init);
+module_exit(pnd2_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h
new file mode 100644
index 0000000..61b6e79
--- /dev/null
+++ b/drivers/edac/pnd2_edac.h
@@ -0,0 +1,301 @@
+/*
+ * Register bitfield descriptions for Pondicherry2 memory controller.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _PND2_REGS_H
+#define _PND2_REGS_H
+
+struct b_cr_touud_lo_pci {
+ u32 lock : 1;
+ u32 reserved_1 : 19;
+ u32 touud : 12;
+};
+
+#define b_cr_touud_lo_pci_port 0x4c
+#define b_cr_touud_lo_pci_offset 0xa8
+#define b_cr_touud_lo_pci_r_opcode 0x04
+
+struct b_cr_touud_hi_pci {
+ u32 touud : 7;
+ u32 reserved_0 : 25;
+};
+
+#define b_cr_touud_hi_pci_port 0x4c
+#define b_cr_touud_hi_pci_offset 0xac
+#define b_cr_touud_hi_pci_r_opcode 0x04
+
+struct b_cr_tolud_pci {
+ u32 lock : 1;
+ u32 reserved_0 : 19;
+ u32 tolud : 12;
+};
+
+#define b_cr_tolud_pci_port 0x4c
+#define b_cr_tolud_pci_offset 0xbc
+#define b_cr_tolud_pci_r_opcode 0x04
+
+struct b_cr_mchbar_lo_pci {
+ u32 enable : 1;
+ u32 pad_3_1 : 3;
+ u32 pad_14_4: 11;
+ u32 base: 17;
+};
+
+struct b_cr_mchbar_hi_pci {
+ u32 base : 7;
+ u32 pad_31_7 : 25;
+};
+
+/* Symmetric region */
+struct b_cr_slice_channel_hash {
+ u64 slice_1_disabled : 1;
+ u64 hvm_mode : 1;
+ u64 interleave_mode : 2;
+ u64 slice_0_mem_disabled : 1;
+ u64 reserved_0 : 1;
+ u64 slice_hash_mask : 14;
+ u64 reserved_1 : 11;
+ u64 enable_pmi_dual_data_mode : 1;
+ u64 ch_1_disabled : 1;
+ u64 reserved_2 : 1;
+ u64 sym_slice0_channel_enabled : 2;
+ u64 sym_slice1_channel_enabled : 2;
+ u64 ch_hash_mask : 14;
+ u64 reserved_3 : 11;
+ u64 lock : 1;
+};
+
+#define b_cr_slice_channel_hash_port 0x4c
+#define b_cr_slice_channel_hash_offset 0x4c58
+#define b_cr_slice_channel_hash_r_opcode 0x06
+
+struct b_cr_mot_out_base_mchbar {
+ u32 reserved_0 : 14;
+ u32 mot_out_base : 15;
+ u32 reserved_1 : 1;
+ u32 tr_en : 1;
+ u32 imr_en : 1;
+};
+
+#define b_cr_mot_out_base_mchbar_port 0x4c
+#define b_cr_mot_out_base_mchbar_offset 0x6af0
+#define b_cr_mot_out_base_mchbar_r_opcode 0x00
+
+struct b_cr_mot_out_mask_mchbar {
+ u32 reserved_0 : 14;
+ u32 mot_out_mask : 15;
+ u32 reserved_1 : 1;
+ u32 ia_iwb_en : 1;
+ u32 gt_iwb_en : 1;
+};
+
+#define b_cr_mot_out_mask_mchbar_port 0x4c
+#define b_cr_mot_out_mask_mchbar_offset 0x6af4
+#define b_cr_mot_out_mask_mchbar_r_opcode 0x00
+
+struct b_cr_asym_mem_region0_mchbar {
+ u32 pad : 4;
+ u32 slice0_asym_base : 11;
+ u32 pad_18_15 : 4;
+ u32 slice0_asym_limit : 11;
+ u32 slice0_asym_channel_select : 1;
+ u32 slice0_asym_enable : 1;
+};
+
+#define b_cr_asym_mem_region0_mchbar_port 0x4c
+#define b_cr_asym_mem_region0_mchbar_offset 0x6e40
+#define b_cr_asym_mem_region0_mchbar_r_opcode 0x00
+
+struct b_cr_asym_mem_region1_mchbar {
+ u32 pad : 4;
+ u32 slice1_asym_base : 11;
+ u32 pad_18_15 : 4;
+ u32 slice1_asym_limit : 11;
+ u32 slice1_asym_channel_select : 1;
+ u32 slice1_asym_enable : 1;
+};
+
+#define b_cr_asym_mem_region1_mchbar_port 0x4c
+#define b_cr_asym_mem_region1_mchbar_offset 0x6e44
+#define b_cr_asym_mem_region1_mchbar_r_opcode 0x00
+
+/* Some bit fields moved in above two structs on Denverton */
+struct b_cr_asym_mem_region_denverton {
+ u32 pad : 4;
+ u32 slice_asym_base : 8;
+ u32 pad_19_12 : 8;
+ u32 slice_asym_limit : 8;
+ u32 pad_28_30 : 3;
+ u32 slice_asym_enable : 1;
+};
+
+struct b_cr_asym_2way_mem_region_mchbar {
+ u32 pad : 2;
+ u32 asym_2way_intlv_mode : 2;
+ u32 asym_2way_base : 11;
+ u32 pad_16_15 : 2;
+ u32 asym_2way_limit : 11;
+ u32 pad_30_28 : 3;
+ u32 asym_2way_interleave_enable : 1;
+};
+
+#define b_cr_asym_2way_mem_region_mchbar_port 0x4c
+#define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50
+#define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00
+
+/* Apollo Lake d-unit */
+
+struct d_cr_drp0 {
+ u32 rken0 : 1;
+ u32 rken1 : 1;
+ u32 ddmen : 1;
+ u32 rsvd3 : 1;
+ u32 dwid : 2;
+ u32 dden : 3;
+ u32 rsvd13_9 : 5;
+ u32 rsien : 1;
+ u32 bahen : 1;
+ u32 rsvd18_16 : 3;
+ u32 caswizzle : 2;
+ u32 eccen : 1;
+ u32 dramtype : 3;
+ u32 blmode : 3;
+ u32 addrdec : 2;
+ u32 dramdevice_pr : 2;
+};
+
+#define d_cr_drp0_offset 0x1400
+#define d_cr_drp0_r_opcode 0x00
+
+/* Denverton d-unit */
+
+struct d_cr_dsch {
+ u32 ch0en : 1;
+ u32 ch1en : 1;
+ u32 ddr4en : 1;
+ u32 coldwake : 1;
+ u32 newbypdis : 1;
+ u32 chan_width : 1;
+ u32 rsvd6_6 : 1;
+ u32 ooodis : 1;
+ u32 rsvd18_8 : 11;
+ u32 ic : 1;
+ u32 rsvd31_20 : 12;
+};
+
+#define d_cr_dsch_port 0x16
+#define d_cr_dsch_offset 0x0
+#define d_cr_dsch_r_opcode 0x0
+
+struct d_cr_ecc_ctrl {
+ u32 eccen : 1;
+ u32 rsvd31_1 : 31;
+};
+
+#define d_cr_ecc_ctrl_offset 0x180
+#define d_cr_ecc_ctrl_r_opcode 0x0
+
+struct d_cr_drp {
+ u32 rken0 : 1;
+ u32 rken1 : 1;
+ u32 rken2 : 1;
+ u32 rken3 : 1;
+ u32 dimmdwid0 : 2;
+ u32 dimmdden0 : 2;
+ u32 dimmdwid1 : 2;
+ u32 dimmdden1 : 2;
+ u32 rsvd15_12 : 4;
+ u32 dimmflip : 1;
+ u32 rsvd31_17 : 15;
+};
+
+#define d_cr_drp_offset 0x158
+#define d_cr_drp_r_opcode 0x0
+
+struct d_cr_dmap {
+ u32 ba0 : 5;
+ u32 ba1 : 5;
+ u32 bg0 : 5; /* if ddr3, ba2 = bg0 */
+ u32 bg1 : 5; /* if ddr3, ba3 = bg1 */
+ u32 rs0 : 5;
+ u32 rs1 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap_offset 0x174
+#define d_cr_dmap_r_opcode 0x0
+
+struct d_cr_dmap1 {
+ u32 ca11 : 6;
+ u32 bxor : 1;
+ u32 rsvd : 25;
+};
+
+#define d_cr_dmap1_offset 0xb4
+#define d_cr_dmap1_r_opcode 0x0
+
+struct d_cr_dmap2 {
+ u32 row0 : 5;
+ u32 row1 : 5;
+ u32 row2 : 5;
+ u32 row3 : 5;
+ u32 row4 : 5;
+ u32 row5 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap2_offset 0x148
+#define d_cr_dmap2_r_opcode 0x0
+
+struct d_cr_dmap3 {
+ u32 row6 : 5;
+ u32 row7 : 5;
+ u32 row8 : 5;
+ u32 row9 : 5;
+ u32 row10 : 5;
+ u32 row11 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap3_offset 0x14c
+#define d_cr_dmap3_r_opcode 0x0
+
+struct d_cr_dmap4 {
+ u32 row12 : 5;
+ u32 row13 : 5;
+ u32 row14 : 5;
+ u32 row15 : 5;
+ u32 row16 : 5;
+ u32 row17 : 5;
+ u32 rsvd : 2;
+};
+
+#define d_cr_dmap4_offset 0x150
+#define d_cr_dmap4_r_opcode 0x0
+
+struct d_cr_dmap5 {
+ u32 ca3 : 4;
+ u32 ca4 : 4;
+ u32 ca5 : 4;
+ u32 ca6 : 4;
+ u32 ca7 : 4;
+ u32 ca8 : 4;
+ u32 ca9 : 4;
+ u32 rsvd : 4;
+};
+
+#define d_cr_dmap5_offset 0x154
+#define d_cr_dmap5_r_opcode 0x0
+
+#endif /* _PND2_REGS_H */
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 6c270d9..6692460 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev)
reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS);
if (!reg)
goto chk_iob_axi0;
- dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n");
+ dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n");
if (reg & IOBPA_RDATA_CORRUPT_MASK)
dev_err(edac_dev->dev, "IOB PA read data RAM error\n");
if (reg & IOBPA_M_RDATA_CORRUPT_MASK)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 96bbae5..fc09c76 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -44,7 +44,7 @@
config EXTCON_INTEL_INT3496
tristate "Intel INT3496 ACPI device extcon driver"
- depends on GPIOLIB && ACPI
+ depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
help
Say Y here to enable extcon support for USB OTG ports controlled by
an Intel INT3496 ACPI device.
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index a3131b0..9d17984 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = {
EXTCON_NONE,
};
+static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false };
+static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false };
+static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false };
+
+static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = {
+ { "id-gpios", &id_gpios, 1 },
+ { "vbus-gpios", &vbus_gpios, 1 },
+ { "mux-gpios", &mux_gpios, 1 },
+ { },
+};
+
static void int3496_do_usb_id(struct work_struct *work)
{
struct int3496_data *data =
@@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev)
struct int3496_data *data;
int ret;
+ ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
+ acpi_int3496_default_gpios);
+ if (ret) {
+ dev_err(dev, "can't add GPIO ACPI mapping\n");
+ return ret;
+ }
+
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev)
data->dev = dev;
INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
- data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
- INT3496_GPIO_USB_ID,
- GPIOD_IN);
+ data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN);
if (IS_ERR(data->gpio_usb_id)) {
ret = PTR_ERR(data->gpio_usb_id);
dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
return ret;
+ } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) {
+ dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n");
+ gpiod_direction_input(data->gpio_usb_id);
}
data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
- if (data->usb_id_irq <= 0) {
+ if (data->usb_id_irq < 0) {
dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
- return -EINVAL;
+ return data->usb_id_irq;
}
- data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
- INT3496_GPIO_VBUS_EN,
- GPIOD_ASIS);
+ data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS);
if (IS_ERR(data->gpio_vbus_en))
dev_info(dev, "can't request VBUS EN GPIO\n");
- data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
- INT3496_GPIO_USB_MUX,
- GPIOD_ASIS);
+ data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS);
if (IS_ERR(data->gpio_usb_mux))
dev_info(dev, "can't request USB MUX GPIO\n");
@@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, data->usb_id_irq, data);
cancel_delayed_work_sync(&data->work);
+ acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
return 0;
}
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 9e1a138..16a8951 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev)
gpio->regmap = a10sr->regmap;
gpio->gp = altr_a10sr_gc;
-
+ gpio->gp.parent = pdev->dev.parent;
gpio->gp.of_node = pdev->dev.of_node;
ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 5bddbd5..3fe6a21 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- if (type == IRQ_TYPE_NONE)
+ if (type == IRQ_TYPE_NONE) {
+ irq_set_handler_locked(d, handle_bad_irq);
return 0;
- if (type == IRQ_TYPE_LEVEL_HIGH &&
- altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
+ }
+ if (type == altera_gc->interrupt_trigger) {
+ if (type == IRQ_TYPE_LEVEL_HIGH)
+ irq_set_handler_locked(d, handle_level_irq);
+ else
+ irq_set_handler_locked(d, handle_simple_irq);
return 0;
- if (type == IRQ_TYPE_EDGE_RISING &&
- altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
- return 0;
- if (type == IRQ_TYPE_EDGE_FALLING &&
- altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
- return 0;
- if (type == IRQ_TYPE_EDGE_BOTH &&
- altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
- return 0;
-
+ }
+ irq_set_handler_locked(d, handle_bad_irq);
return -EINVAL;
}
@@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
{
struct altera_gpio_chip *altera_gc;
@@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->interrupt_trigger = reg;
ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&pdev->dev, "could not add irqchip\n");
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index bdb6923..2a57d024 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
- int intcap, intf, i;
+ int intcap, intf, i, gpio, gpio_orig, intcap_mask;
unsigned int child_irq;
+ bool intf_set, intcap_changed, gpio_bit_changed,
+ defval_changed, gpio_set;
mutex_lock(&mcp->lock);
if (mcp_read(mcp, MCP_INTF, &intf) < 0) {
@@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
}
mcp->cache[MCP_INTCAP] = intcap;
+
+ /* This clears the interrupt(configurable on S18) */
+ if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
+ mutex_unlock(&mcp->lock);
+ return IRQ_HANDLED;
+ }
+ gpio_orig = mcp->cache[MCP_GPIO];
+ mcp->cache[MCP_GPIO] = gpio;
mutex_unlock(&mcp->lock);
+ if (mcp->cache[MCP_INTF] == 0) {
+ /* There is no interrupt pending */
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(mcp->chip.parent,
+ "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n",
+ intcap, intf, gpio_orig, gpio);
for (i = 0; i < mcp->chip.ngpio; i++) {
- if ((BIT(i) & mcp->cache[MCP_INTF]) &&
- ((BIT(i) & intcap & mcp->irq_rise) ||
- (mcp->irq_fall & ~intcap & BIT(i)) ||
- (BIT(i) & mcp->cache[MCP_INTCON]))) {
+ /* We must check all of the inputs on the chip,
+ * otherwise we may not notice a change on >=2 pins.
+ *
+ * On at least the mcp23s17, INTCAP is only updated
+ * one byte at a time(INTCAPA and INTCAPB are
+ * not written to at the same time - only on a per-bank
+ * basis).
+ *
+ * INTF only contains the single bit that caused the
+ * interrupt per-bank. On the mcp23s17, there is
+ * INTFA and INTFB. If two pins are changed on the A
+ * side at the same time, INTF will only have one bit
+ * set. If one pin on the A side and one pin on the B
+ * side are changed at the same time, INTF will have
+ * two bits set. Thus, INTF can't be the only check
+ * to see if the input has changed.
+ */
+
+ intf_set = BIT(i) & mcp->cache[MCP_INTF];
+ if (i < 8 && intf_set)
+ intcap_mask = 0x00FF;
+ else if (i >= 8 && intf_set)
+ intcap_mask = 0xFF00;
+ else
+ intcap_mask = 0x00;
+
+ intcap_changed = (intcap_mask &
+ (BIT(i) & mcp->cache[MCP_INTCAP])) !=
+ (intcap_mask & (BIT(i) & gpio_orig));
+ gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
+ gpio_bit_changed = (BIT(i) & gpio_orig) !=
+ (BIT(i) & mcp->cache[MCP_GPIO]);
+ defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
+ ((BIT(i) & mcp->cache[MCP_GPIO]) !=
+ (BIT(i) & mcp->cache[MCP_DEFVAL]));
+
+ if (((gpio_bit_changed || intcap_changed) &&
+ (BIT(i) & mcp->irq_rise) && gpio_set) ||
+ ((gpio_bit_changed || intcap_changed) &&
+ (BIT(i) & mcp->irq_fall) && !gpio_set) ||
+ defval_changed) {
child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
handle_nested_irq(child_irq);
}
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 06dac72..d993386 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file,
struct seq_file *sfile;
struct gpio_desc *desc;
struct gpio_chip *gc;
- int status, val;
+ int val;
char buf;
sfile = file->private_data;
@@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file,
chip = priv->chip;
gc = &chip->gc;
- status = copy_from_user(&buf, usr_buf, 1);
- if (status)
- return status;
+ if (copy_from_user(&buf, usr_buf, 1))
+ return -EFAULT;
if (buf == '0')
val = 0;
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 40a8881..f1c6ec1 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -42,9 +42,7 @@ struct xgene_gpio {
struct gpio_chip chip;
void __iomem *base;
spinlock_t lock;
-#ifdef CONFIG_PM
u32 set_dr_val[XGENE_MAX_GPIO_BANKS];
-#endif
};
static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
return 0;
}
-#ifdef CONFIG_PM
-static int xgene_gpio_suspend(struct device *dev)
+static __maybe_unused int xgene_gpio_suspend(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev)
return 0;
}
-static int xgene_gpio_resume(struct device *dev)
+static __maybe_unused int xgene_gpio_resume(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev)
}
static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
-#define XGENE_GPIO_PM_OPS (&xgene_gpio_pm)
-#else
-#define XGENE_GPIO_PM_OPS NULL
-#endif
static int xgene_gpio_probe(struct platform_device *pdev)
{
@@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = {
.name = "xgene-gpio",
.of_match_table = xgene_gpio_of_match,
.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
- .pm = XGENE_GPIO_PM_OPS,
+ .pm = &xgene_gpio_pm,
},
.probe = xgene_gpio_probe,
};
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb5..8a08e81 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
# of AMDSOC/AMDGPU drm driver.
# It provides the HW control for ACP related functionalities.
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d2d0f60..99424cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
for (; i >= 0; i--)
drm_free_large(p->chunks[i].kdata);
kfree(p->chunks);
+ p->chunks = NULL;
+ p->nchunks = 0;
put_ctx:
amdgpu_ctx_put(p->ctx);
free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4120b35..de0cf33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
@@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
/* clear wb memory */
- memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
+ memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
}
return 0;
@@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
use_bank = 0;
}
- *pos &= 0x3FFFF;
+ *pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
use_bank = 0;
}
- *pos &= 0x3FFFF;
+ *pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f7adbac..b76cd69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0, 0, 0}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index f55e45b..c5dec21 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3464,6 +3464,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->revision == 0xC7) ||
+ (adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ }
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 50bdb24..4a785d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
if (adev->rev_id != 0x00) {
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ adev->pg_flags |=
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 8cf71f3..261b828 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 08e6a71..294b536 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
clk_prepare_enable(hwdev->pxlclk);
- /* mclk needs to be set to the same or higher rate than pxlclk */
- clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+ /* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
hwdev->modeset(hwdev, &vm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 488aedf..9f55130 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
};
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 414aada..d5aec08 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -37,6 +37,8 @@
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
#define MALIDP_LAYER_COMP_SIZE 0x010
#define MALIDP_LAYER_OFFSET 0x014
+#define MALIDP550_LS_ENABLE 0x01c
+#define MALIDP550_LS_R1_IN_SIZE 0x020
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
+ if (mp->layer->id == DE_SMART)
+ malidp_hw_write(mp->hwdev,
+ LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+ mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
- /* Skip the features which the SMART layer doesn't have */
- if (id == DE_SMART)
+ if (id == DE_SMART) {
+ /*
+ * Enable the first rectangle in the SMART layer to be
+ * able to use it as a drm plane.
+ */
+ malidp_hw_write(malidp->dev, 1,
+ plane->layer->base + MALIDP550_LS_ENABLE);
+ /* Skip the features which the SMART layer doesn't have. */
continue;
+ }
drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index aff6d4a..b816067 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -84,6 +84,7 @@
/* Stride register offsets relative to Lx_BASE */
#define MALIDP_DE_LG_STRIDE 0x18
#define MALIDP_DE_LV_STRIDE0 0x18
+#define MALIDP550_DE_LS_R1_STRIDE 0x28
/* macros to set values into registers */
#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f6d4d97..324a688 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
* to KMS, hence fail if different settings are requested.
*/
if (var->bits_per_pixel != fb->format->cpp[0] * 8 ||
- var->xres != fb->width || var->yres != fb->height ||
- var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
- DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
+ var->xres > fb->width || var->yres > fb->height ||
+ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+ DRM_DEBUG("fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 0fd6f7a..c0e8d33 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -68,6 +68,8 @@ struct decon_context {
unsigned long flags;
unsigned long out_type;
int first_win;
+ spinlock_t vblank_lock;
+ u32 frame_id;
};
static const uint32_t decon_formats[] = {
@@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
if (ctx->out_type & IFTYPE_I80)
val |= VIDINTCON0_FRAMEDONE;
else
- val |= VIDINTCON0_INTFRMEN;
+ val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
writel(val, ctx->addr + DECON_VIDINTCON0);
}
@@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
writel(0, ctx->addr + DECON_VIDINTCON0);
}
+/* return number of starts/ends of frame transmissions since reset */
+static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
+{
+ u32 frm, pfrm, status, cnt = 2;
+
+ /* To get consistent result repeat read until frame id is stable.
+ * Usually the loop will be executed once, in rare cases when the loop
+ * is executed at frame change time 2nd pass will be needed.
+ */
+ frm = readl(ctx->addr + DECON_CRFMID);
+ do {
+ status = readl(ctx->addr + DECON_VIDCON1);
+ pfrm = frm;
+ frm = readl(ctx->addr + DECON_CRFMID);
+ } while (frm != pfrm && --cnt);
+
+ /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case
+ * of RGB, it should be taken into account.
+ */
+ if (!frm)
+ return 0;
+
+ switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
+ case VIDCON1_VSTATUS_VS:
+ if (!(ctx->out_type & IFTYPE_I80))
+ --frm;
+ break;
+ case VIDCON1_VSTATUS_BP:
+ --frm;
+ break;
+ case VIDCON1_I80_ACTIVE:
+ case VIDCON1_VSTATUS_AC:
+ if (end)
+ --frm;
+ break;
+ default:
+ break;
+ }
+
+ return frm;
+}
+
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
return;
if (!(ctx->out_type & I80_HW_TRG)) {
- writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
- | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
+ writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
+ TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN,
ctx->addr + DECON_TRIGCON);
return;
}
@@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
+ unsigned long flags;
int i;
if (test_bit(BIT_SUSPENDED, &ctx->flags))
return;
+ spin_lock_irqsave(&ctx->vblank_lock, flags);
+
for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
@@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
if (ctx->out_type & IFTYPE_I80)
set_bit(BIT_WIN_UPDATED, &ctx->flags);
+
+ ctx->frame_id = decon_get_frame_count(ctx, true);
+
+ exynos_crtc_handle_event(crtc);
+
+ spin_unlock_irqrestore(&ctx->vblank_lock, flags);
}
static void decon_swreset(struct decon_context *ctx)
{
unsigned int tries;
+ unsigned long flags;
writel(0, ctx->addr + DECON_VIDCON0);
for (tries = 2000; tries; --tries) {
@@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx)
WARN(tries == 0, "failed to software reset DECON\n");
+ spin_lock_irqsave(&ctx->vblank_lock, flags);
+ ctx->frame_id = 0;
+ spin_unlock_irqrestore(&ctx->vblank_lock, flags);
+
if (!(ctx->out_type & IFTYPE_HDMI))
return;
@@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = {
.unbind = decon_unbind,
};
+static void decon_handle_vblank(struct decon_context *ctx)
+{
+ u32 frm;
+
+ spin_lock(&ctx->vblank_lock);
+
+ frm = decon_get_frame_count(ctx, true);
+
+ if (frm != ctx->frame_id) {
+ /* handle only if incremented, take care of wrap-around */
+ if ((s32)(frm - ctx->frame_id) > 0)
+ drm_crtc_handle_vblank(&ctx->crtc->base);
+ ctx->frame_id = frm;
+ }
+
+ spin_unlock(&ctx->vblank_lock);
+}
+
static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = dev_id;
@@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
(VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F))
return IRQ_HANDLED;
}
- drm_crtc_handle_vblank(&ctx->crtc->base);
+ decon_handle_vblank(ctx);
}
out:
@@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
__set_bit(BIT_SUSPENDED, &ctx->flags);
ctx->dev = dev;
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
+ spin_lock_init(&ctx->vblank_lock);
if (ctx->out_type & IFTYPE_HDMI) {
ctx->first_win = 1;
@@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type |= IFTYPE_I80;
}
- if (ctx->out_type | I80_HW_TRG) {
+ if (ctx->out_type & I80_HW_TRG) {
ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-sysreg");
if (IS_ERR(ctx->sysreg)) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index f9ab19e..4881180 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
+ exynos_crtc_handle_event(crtc);
}
static void decon_init(struct decon_context *ctx)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 5367b66..c65f450 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -85,24 +85,9 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct drm_pending_vblank_event *event;
- unsigned long flags;
if (exynos_crtc->ops->atomic_flush)
exynos_crtc->ops->atomic_flush(exynos_crtc);
-
- event = crtc->state->event;
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
-
}
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -114,6 +99,24 @@ static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.atomic_flush = exynos_crtc_atomic_flush,
};
+void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
+{
+ struct drm_crtc *crtc = &exynos_crtc->base;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ unsigned long flags;
+
+ if (event) {
+ crtc->state->event = NULL;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+
+}
+
static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6a581a8..abd5d6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
*/
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
+void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 812e2ec..d7ef263 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -86,7 +86,7 @@
#define DSIM_SYNC_INFORM (1 << 27)
#define DSIM_EOT_DISABLE (1 << 28)
#define DSIM_MFLUSH_VS (1 << 29)
-/* This flag is valid only for exynos3250/3472/4415/5260/5430 */
+/* This flag is valid only for exynos3250/3472/5260/5430 */
#define DSIM_CLKLANE_STOP (1 << 30)
/* DSIM_ESCMODE */
@@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
.reg_values = reg_values,
};
-static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
- .reg_ofs = exynos_reg_ofs,
- .plltmr_reg = 0x58,
- .has_clklane_stop = 1,
- .num_clks = 2,
- .max_freq = 1000,
- .wait_for_reset = 1,
- .num_bits_resol = 11,
- .reg_values = reg_values,
-};
-
static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
@@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
.data = &exynos3_dsi_driver_data },
{ .compatible = "samsung,exynos4210-mipi-dsi",
.data = &exynos4_dsi_driver_data },
- { .compatible = "samsung,exynos4415-mipi-dsi",
- .data = &exynos4415_dsi_driver_data },
{ .compatible = "samsung,exynos5410-mipi-dsi",
.data = &exynos5_dsi_driver_data },
{ .compatible = "samsung,exynos5422-mipi-dsi",
@@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
bool first = !xfer->tx_done;
u32 reg;
- dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n",
+ dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n",
xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done);
if (length > DSI_TX_FIFO_SIZE)
@@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
spin_unlock_irqrestore(&dsi->transfer_lock, flags);
dev_dbg(dsi->dev,
- "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
+ "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n",
xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len,
xfer->rx_done);
@@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
int te_gpio_irq;
dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ if (dsi->te_gpio == -ENOENT)
+ return 0;
+
if (!gpio_is_valid(dsi->te_gpio)) {
- dev_err(dsi->dev, "no te-gpios specified\n");
ret = dsi->te_gpio;
+ dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret);
goto out;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 95871577..5b18b5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev)
goto err_put_clk;
}
- DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
+ DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a9fa444..3f04d72 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -71,10 +71,10 @@
#define TRIGCON 0x1A4
#define TRGMODE_ENABLE (1 << 0)
#define SWTRGCMD_ENABLE (1 << 1)
-/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */
+/* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */
#define HWTRGEN_ENABLE (1 << 3)
#define HWTRGMASK_ENABLE (1 << 4)
-/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */
+/* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */
#define HWTRIGEN_PER_ENABLE (1 << 31)
/* display mode change control register except exynos4 */
@@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
.has_vtsel = 1,
};
-static struct fimd_driver_data exynos4415_fimd_driver_data = {
- .timing_base = 0x20000,
- .lcdblk_offset = 0x210,
- .lcdblk_vt_shift = 10,
- .lcdblk_bypass_shift = 1,
- .trg_type = I80_HW_TRG,
- .has_shadowcon = 1,
- .has_vidoutcon = 1,
- .has_vtsel = 1,
- .has_trigger_per_te = 1,
-};
-
static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x214,
@@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",
.data = &exynos4_fimd_driver_data },
- { .compatible = "samsung,exynos4415-fimd",
- .data = &exynos4415_fimd_driver_data },
{ .compatible = "samsung,exynos5250-fimd",
.data = &exynos5_fimd_driver_data },
{ .compatible = "samsung,exynos5420-fimd",
@@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
val |= VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_FRAMESEL0_MASK;
- val |= VIDINTCON0_FRAMESEL0_VSYNC;
+ val |= VIDINTCON0_FRAMESEL0_FRONTPORCH;
val &= ~VIDINTCON0_FRAMESEL1_MASK;
val |= VIDINTCON0_FRAMESEL1_NONE;
}
@@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = 0; i < WINDOWS_NR; i++)
fimd_shadow_protect_win(ctx, i, false);
+
+ exynos_crtc_handle_event(crtc);
}
static void fimd_update_plane(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 4c28f7f..55a1579 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
- DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
+ DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
return exynos_gem;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index bef5798..0506b2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
return ret;
}
- DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
+ DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv);
mutex_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 9c84ee7..3edda18 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
* e.g PAUSE state, queue buf, command control.
*/
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
+ DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
}
property->prop_id = ret;
- DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
+ DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
property->prop_id, property->cmd, ippdrv);
/* stored property information and ippdrv in private data */
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
{
int i;
- DRM_DEBUG_KMS("node[%p]\n", m_node);
+ DRM_DEBUG_KMS("node[%pK]\n", m_node);
if (!m_node) {
DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
m_node->buf_id = qbuf->buf_id;
INIT_LIST_HEAD(&m_node->list);
- DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
for_each_ipp_planar(i) {
@@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
- DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
+ DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
/*
* qbuf == NULL condition means all event deletion.
@@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node
/* find memory node from memory list */
list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
+ DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
/* compare buffer id */
if (m_node->buf_id == qbuf->buf_id)
@@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
struct exynos_drm_ipp_ops *ops = NULL;
int ret = 0;
- DRM_DEBUG_KMS("node[%p]\n", m_node);
+ DRM_DEBUG_KMS("node[%pK]\n", m_node);
if (!m_node) {
DRM_ERROR("invalid queue node.\n");
@@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- DRM_DEBUG_KMS("m_node[%p]\n", m_node);
+ DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
@@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
}
ippdrv->prop_list.ipp_id = ret;
- DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
+ DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
count++, ippdrv, ret);
/* store parent device for node */
@@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
file_priv->ipp_dev = dev;
- DRM_DEBUG_KMS("done priv[%p]\n", dev);
+ DRM_DEBUG_KMS("done priv[%pK]\n", dev);
return 0;
}
@@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node,
&ippdrv->cmd_list, list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
+ DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
count++, ippdrv);
if (c_node->filp == file) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 6591e40..79282a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev)
goto err_ippdrv_register;
}
- DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
+ DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
platform_set_drvdata(pdev, rot);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 57fe514..5d9a62a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
.update_plane = vidi_update_plane,
+ .atomic_flush = exynos_crtc_handle_event,
};
static void vidi_fake_vblank_timer(unsigned long arg)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 72143ac..25edb63 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
return;
mixer_vsync_set_update(mixer_ctx, true);
+ exynos_crtc_handle_event(crtc);
}
static void mixer_enable(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 3b6caac..325618d 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
const char *item;
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
- gvt_err("Invalid vGPU creation params\n");
+ gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}
@@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0;
no_enough_resource:
- gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
- gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
- vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
+ gvt_vgpu_err("fail to allocate resource %s\n", item);
+ gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
+ BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ae6e2b..2b92cc8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
return ret;
}
+static inline bool is_force_nonpriv_mmio(unsigned int offset)
+{
+ return (offset >= 0x24d0 && offset < 0x2500);
+}
+
+static int force_nonpriv_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ struct intel_gvt *gvt = s->vgpu->gvt;
+ unsigned int data = cmd_val(s, index + 1);
+
+ if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
+ gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
+ offset, data);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
struct intel_gvt *gvt = vgpu->gvt;
if (offset + 4 > gvt->device_info.mmio_size) {
- gvt_err("%s access to (%x) outside of MMIO range\n",
+ gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
return -EINVAL;
}
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
- gvt_err("vgpu%d: %s access to non-render register (%x)\n",
- s->vgpu->id, cmd, offset);
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
+ cmd, offset);
return 0;
}
if (is_shadowed_mmio(offset)) {
- gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
- s->vgpu->id, offset);
+ gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
return 0;
}
+ if (is_force_nonpriv_mmio(offset) &&
+ force_nonpriv_reg_handler(s, offset, index))
+ return -EINVAL;
+
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
else if (post_sync == 1) {
/* check ggtt*/
- if ((cmd_val(s, 2) & (1 << 2))) {
+ if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
gma = cmd_val(s, 2) & GENMASK(31, 3);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, 3)) << 32;
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
u32 dword2 = cmd_val(s, 2);
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
break;
default:
- gvt_err("unknown plane code %d\n", plane);
+ gvt_vgpu_err("unknown plane code %d\n", plane);
return -EINVAL;
}
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
{
struct mi_display_flip_command_info info;
+ struct intel_vgpu *vgpu = s->vgpu;
int ret;
int i;
int len = cmd_length(s);
ret = decode_mi_display_flip(s, &info);
if (ret) {
- gvt_err("fail to decode MI display flip command\n");
+ gvt_vgpu_err("fail to decode MI display flip command\n");
return ret;
}
ret = check_mi_display_flip(s, &info);
if (ret) {
- gvt_err("invalid MI display flip command\n");
+ gvt_vgpu_err("invalid MI display flip command\n");
return ret;
}
ret = update_plane_mmio_from_mi_display_flip(s, &info);
if (ret) {
- gvt_err("fail to update plane mmio\n");
+ gvt_vgpu_err("fail to update plane mmio\n");
return ret;
}
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
int ret;
if (op_size > max_surface_size) {
- gvt_err("command address audit fail name %s\n", s->info->name);
+ gvt_vgpu_err("command address audit fail name %s\n",
+ s->info->name);
return -EINVAL;
}
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
}
return 0;
err:
- gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
+ gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size);
pr_err("cmd dump: ");
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
static inline int unexpected_cmd(struct parser_exec_state *s)
{
- gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
- s->vgpu->id, s->info->name);
+ struct intel_vgpu *vgpu = s->vgpu;
+
+ gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
+
return -EINVAL;
}
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
while (gma != end_gma) {
gpa = intel_vgpu_gma_to_gpa(mm, gma);
if (gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("invalid gma address: %lx\n", gma);
+ gvt_vgpu_err("invalid gma address: %lx\n", gma);
return -EFAULT;
}
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
uint32_t bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
+ struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
/* get the start gm address of the batch buffer */
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
gma, gma + 4, &cmd);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_shadow_bb_entry *entry_obj;
+ struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
uint32_t bb_size;
void *dst = NULL;
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) {
- gvt_err("failed to set shadow batch to CPU\n");
+ gvt_vgpu_err("failed to set shadow batch to CPU\n");
goto unmap_src;
}
@@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
gma, gma + bb_size,
dst);
if (ret) {
- gvt_err("fail to copy guest ring buffer\n");
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
{
bool second_level;
int ret = 0;
+ struct intel_vgpu *vgpu = s->vgpu;
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
- gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
+ gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL;
}
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
- gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
+ gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL;
}
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (batch_buffer_needs_scan(s)) {
ret = perform_bb_shadow(s);
if (ret < 0)
- gvt_err("invalid shadow batch buffer\n");
+ gvt_vgpu_err("invalid shadow batch buffer\n");
} else {
/* emulate a batch buffer end to do return right */
ret = cmd_handler_mi_batch_buffer_end(s);
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
int ret = 0;
cycles_t t0, t1, t2;
struct parser_exec_state s_before_advance_custom;
+ struct intel_vgpu *vgpu = s->vgpu;
t0 = get_cycles();
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
}
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
- gvt_err("%s handler error\n", info->name);
+ gvt_vgpu_err("%s handler error\n", info->name);
return ret;
}
}
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
ret = cmd_advance_default(s);
if (ret) {
- gvt_err("%s IP advance error\n", info->name);
+ gvt_vgpu_err("%s IP advance error\n", info->name);
return ret;
}
}
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
unsigned long gma_head, gma_tail, gma_bottom;
int ret = 0;
+ struct intel_vgpu *vgpu = s->vgpu;
gma_head = rb_start + rb_head;
gma_tail = rb_start + rb_tail;
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
if (!(s->ip_gma >= rb_start) ||
!(s->ip_gma < gma_bottom)) {
- gvt_err("ip_gma %lx out of ring scope."
+ gvt_vgpu_err("ip_gma %lx out of ring scope."
"(base:0x%lx, bottom: 0x%lx)\n",
s->ip_gma, rb_start,
gma_bottom);
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
return -EINVAL;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
- gvt_err("ip_gma %lx out of range."
+ gvt_vgpu_err("ip_gma %lx out of range."
"base 0x%lx head 0x%lx tail 0x%lx\n",
s->ip_gma, rb_start,
rb_head, rb_tail);
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
}
ret = cmd_parser_exec(s);
if (ret) {
- gvt_err("cmd parser error\n");
+ gvt_vgpu_err("cmd parser error\n");
parser_exec_state_dump(s);
break;
}
@@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_top,
workload->shadow_ring_buffer_va);
if (ret) {
- gvt_err("fail to copy guest ring buffer\n");
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
copy_len = gma_top - gma_head;
@@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len);
if (ret) {
- gvt_err("fail to copy guest ring buffer\n");
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
ring->tail += workload->rb_len;
@@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ret;
+ struct intel_vgpu *vgpu = workload->vgpu;
ret = shadow_workload_ring_buffer(workload);
if (ret) {
- gvt_err("fail to shadow workload ring_buffer\n");
+ gvt_vgpu_err("fail to shadow workload ring_buffer\n");
return ret;
}
ret = scan_workload(workload);
if (ret) {
- gvt_err("scan workload error\n");
+ gvt_vgpu_err("scan workload error\n");
return ret;
}
return 0;
@@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
+ struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
@@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
/* get the va of the shadow batch buffer */
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(map)) {
- gvt_err("failed to vmap shadow indirect ctx\n");
+ gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
ret = PTR_ERR(map);
goto put_obj;
}
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret) {
- gvt_err("failed to set shadow indirect ctx to CPU\n");
+ gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
}
@@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
guest_gma, guest_gma + ctx_size,
map);
if (ret) {
- gvt_err("fail to copy guest indirect ctx\n");
+ gvt_vgpu_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}
@@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
+ struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
ret = shadow_indirect_ctx(wa_ctx);
if (ret) {
- gvt_err("fail to shadow indirect ctx\n");
+ gvt_vgpu_err("fail to shadow indirect ctx\n");
return ret;
}
@@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
ret = scan_wa_ctx(wa_ctx);
if (ret) {
- gvt_err("scan wa ctx error\n");
+ gvt_vgpu_err("scan wa ctx error\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
index 68cba7b..b0cff4d 100644
--- a/drivers/gpu/drm/i915/gvt/debug.h
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -27,6 +27,14 @@
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)
+#define gvt_vgpu_err(fmt, args...) \
+do { \
+ if (IS_ERR_OR_NULL(vgpu)) \
+ DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
+ else \
+ DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
+} while (0)
+
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index bda85df..f1648fe 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
unsigned char chr = 0;
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
- gvt_err("Driver tries to read EDID without proper sequence!\n");
+ gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n");
return 0;
}
if (edid->current_edid_read >= EDID_SIZE) {
- gvt_err("edid_get_byte() exceeds the size of EDID!\n");
+ gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n");
return 0;
}
if (!edid->edid_available) {
- gvt_err("Reading EDID but EDID is not available!\n");
+ gvt_vgpu_err("Reading EDID but EDID is not available!\n");
return 0;
}
@@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
chr = edid_data->edid_block[edid->current_edid_read];
edid->current_edid_read++;
} else {
- gvt_err("No EDID available during the reading?\n");
+ gvt_vgpu_err("No EDID available during the reading?\n");
}
return chr;
}
@@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
- gvt_err("Unknown/reserved GMBUS cycle detected!\n");
+ gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
break;
}
/*
@@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
*/
} else {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
- gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
- vgpu->id);
+ gvt_vgpu_err("warning: gmbus3 read with nothing returned\n");
}
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 46eb9fd..f1f426a 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out(
struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx)
{
+ struct intel_vgpu *vgpu = execlist->vgpu;
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
@@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out(
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
- gvt_err("schedule out context is not running context,"
+ gvt_vgpu_err("schedule out context is not running context,"
"ctx id %x running ctx id %x\n",
ctx->context_id,
execlist->running_context->context_id);
@@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
status.udw = vgpu_vreg(vgpu, status_reg + 4);
if (status.execlist_queue_full) {
- gvt_err("virtual execlist slots are full\n");
+ gvt_vgpu_err("virtual execlist slots are full\n");
return NULL;
}
@@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
struct execlist_context_status_format status;
+ struct intel_vgpu *vgpu = execlist->vgpu;
gvt_dbg_el("emulate schedule-in\n");
if (!slot) {
- gvt_err("no available execlist slot\n");
+ gvt_vgpu_err("no available execlist slot\n");
return -EINVAL;
}
@@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
- gvt_err("Cannot pin\n");
return;
}
@@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
- gvt_err("Cannot pin indirect ctx obj\n");
return;
}
@@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
+ struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level;
u32 pdp[8];
@@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
- gvt_err("Advanced Context mode(SVM) is not supported!\n");
+ gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
@@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
- gvt_err("fail to create mm object.\n");
+ gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
@@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
+ gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL;
}
@@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
continue;
if (!desc[i]->privilege_access) {
- gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
- vgpu->id);
+ gvt_vgpu_err("unexpected GGTT elsp submission\n");
return -EINVAL;
}
@@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
}
if (!valid_desc_bitmap) {
- gvt_err("vgpu%d: no valid desc in a elsp submission\n",
- vgpu->id);
+ gvt_vgpu_err("no valid desc in a elsp submission\n");
return -EINVAL;
}
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
test_bit(1, (void *)&valid_desc_bitmap)) {
- gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
- vgpu->id);
+ gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n");
return -EINVAL;
}
@@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
ret = submit_context(vgpu, ring_id, &valid_desc[i],
emulate_schedule_in);
if (ret) {
- gvt_err("vgpu%d: fail to schedule workload\n",
- vgpu->id);
+ gvt_vgpu_err("fail to schedule workload\n");
return ret;
}
emulate_schedule_in = false;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6a5ff23..da73127 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
&& !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
- gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
- vgpu->id, addr, size);
+ gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
+ addr, size);
return false;
}
return true;
@@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate gfn: 0x%lx\n", gfn);
+ gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
return -ENXIO;
}
@@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) {
- gvt_err("fail to map dma addr\n");
+ gvt_vgpu_err("fail to map dma addr\n");
return -EINVAL;
}
@@ -735,7 +735,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
if (reclaim_one_mm(vgpu->gvt))
goto retry;
- gvt_err("fail to allocate ppgtt shadow page\n");
+ gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
return ERR_PTR(-ENOMEM);
}
@@ -750,14 +750,14 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
*/
ret = init_shadow_page(vgpu, &spt->shadow_page, type);
if (ret) {
- gvt_err("fail to initialize shadow page for spt\n");
+ gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err;
}
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL);
if (ret) {
- gvt_err("fail to initialize guest page for spt\n");
+ gvt_vgpu_err("fail to initialize guest page for spt\n");
goto err;
}
@@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
if (p)
return shadow_page_to_ppgtt_spt(p);
- gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
- vgpu->id, mfn);
+ gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
return NULL;
}
@@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
}
s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
if (!s) {
- gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
- vgpu->id, ops->get_pfn(e));
+ gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
+ ops->get_pfn(e));
return -ENXIO;
}
return ppgtt_invalidate_shadow_page(s);
@@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
+ struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_entry e;
unsigned long index;
int ret;
@@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_shadow_entry(spt, &e, index) {
if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
- gvt_err("GVT doesn't support pse bit for now\n");
+ gvt_vgpu_err("GVT doesn't support pse bit for now\n");
return -EINVAL;
}
ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
@@ -868,8 +868,8 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_free_shadow_page(spt);
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
- spt->vgpu->id, spt, e.val64, e.type);
+ gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
+ spt, e.val64, e.type);
return ret;
}
@@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
}
return s;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, s, we->val64, we->type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ s, we->val64, we->type);
return ERR_PTR(ret);
}
@@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
for_each_present_guest_entry(spt, &ge, i) {
if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
- gvt_err("GVT doesn't support pse bit now\n");
+ gvt_vgpu_err("GVT doesn't support pse bit now\n");
ret = -EINVAL;
goto fail;
}
@@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
}
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, spt, ge.val64, ge.type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ spt, ge.val64, ge.type);
return ret;
}
@@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_vgpu_ppgtt_spt *s =
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
if (!s) {
- gvt_err("fail to find guest page\n");
+ gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO;
goto fail;
}
@@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
- vgpu->id, spt, e.val64, e.type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
+ spt, e.val64, e.type);
return ret;
}
@@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
}
return 0;
fail:
- gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
- spt, we->val64, we->type);
+ gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
+ spt, we->val64, we->type);
return ret;
}
@@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table(
}
return 0;
fail:
- gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
- vgpu->id, spt, we->val64, we->type);
+ gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
+ spt, we->val64, we->type);
return ret;
}
@@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm)
spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
if (IS_ERR(spt)) {
- gvt_err("fail to populate guest root pointer\n");
+ gvt_vgpu_err("fail to populate guest root pointer\n");
ret = PTR_ERR(spt);
goto fail;
}
@@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
ret = gtt->mm_alloc_page_table(mm);
if (ret) {
- gvt_err("fail to allocate page table for mm\n");
+ gvt_vgpu_err("fail to allocate page table for mm\n");
goto fail;
}
@@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
}
return mm;
fail:
- gvt_err("fail to create mm\n");
+ gvt_vgpu_err("fail to create mm\n");
if (mm)
intel_gvt_mm_unreference(mm);
return ERR_PTR(ret);
@@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
mm->page_table_level, gma, gpa);
return gpa;
err:
- gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
+ gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
return INTEL_GVT_INVALID_ADDR;
}
@@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (ops->test_present(&e)) {
ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) {
- gvt_err("vgpu%d: fail to translate guest gtt entry\n",
- vgpu->id);
+ gvt_vgpu_err("fail to translate guest gtt entry\n");
return ret;
}
} else {
@@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) {
- gvt_err("fail to allocate scratch page\n");
+ gvt_vgpu_err("fail to allocate scratch page\n");
return -ENOMEM;
}
daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
4096, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) {
- gvt_err("fail to dmamap scratch_pt\n");
+ gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt));
return -ENOMEM;
}
@@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0);
if (IS_ERR(ggtt_mm)) {
- gvt_err("fail to create mm for ggtt.\n");
+ gvt_vgpu_err("fail to create mm for ggtt.\n");
return PTR_ERR(ggtt_mm);
}
@@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt)
for (i = 0; i < preallocated_oos_pages; i++) {
oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
if (!oos_page) {
- gvt_err("fail to pre-allocate oos page\n");
ret = -ENOMEM;
goto fail;
}
@@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
- gvt_err("fail to create mm\n");
+ gvt_vgpu_err("fail to create mm\n");
return PTR_ERR(mm);
}
}
@@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
if (!mm) {
- gvt_err("fail to find ppgtt instance.\n");
+ gvt_vgpu_err("fail to find ppgtt instance.\n");
return -EINVAL;
}
intel_gvt_mm_unreference(mm);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2379192..6dfc48b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
- struct notifier_block shadow_ctx_notifier_block;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
+ struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 8e43395..eaff45d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
GVT_FAILSAFE_UNSUPPORTED_GUEST);
if (!vgpu->mmio.disable_warn_untrack) {
- gvt_err("vgpu%d: found oob fence register access\n",
- vgpu->id);
- gvt_err("vgpu%d: total fence %d, access fence %d\n",
- vgpu->id, vgpu_fence_sz(vgpu),
- fence_num);
+ gvt_vgpu_err("found oob fence register access\n");
+ gvt_vgpu_err("total fence %d, access fence %d\n",
+ vgpu_fence_sz(vgpu), fence_num);
}
memset(p_data, 0, bytes);
return -EINVAL;
@@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
break;
default:
/*should not hit here*/
- gvt_err("invalid forcewake offset 0x%x\n", offset);
+ gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset);
return -EINVAL;
}
} else {
@@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2;
fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK;
} else {
- gvt_err("Invalid train pattern %d\n", train_pattern);
+ gvt_vgpu_err("Invalid train pattern %d\n", train_pattern);
return -EINVAL;
}
@@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupport registers %x\n", offset);
return -EINVAL;
}
@@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
u32 data;
if (!dpy_is_valid_port(port_index)) {
- gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id);
+ gvt_vgpu_err("Unsupported DP port access!\n");
return 0;
}
@@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
if (i == num) {
if (num == SBI_REG_MAX) {
- gvt_err("vgpu%d: SBI caching meets maximum limits\n",
- vgpu->id);
+ gvt_vgpu_err("SBI caching meets maximum limits\n");
return;
}
display->sbi.number++;
@@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
break;
}
if (invalid_read)
- gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
+ gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data);
vgpu->pv_notified = true;
return 0;
@@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
case 1: /* Remove this in guest driver. */
break;
default:
- gvt_err("Invalid PV notification %d\n", notification);
+ gvt_vgpu_err("Invalid PV notification %d\n", notification);
}
return ret;
}
@@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break;
default:
- gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
+ gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
break;
}
@@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
- gvt_err("fail submit workload on ring %d\n", ring_id);
+ gvt_vgpu_err("fail submit workload on ring %d\n",
+ ring_id);
}
++execlist->elsp_dwords.index;
@@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
write_vreg(vgpu, offset, p_data, bytes);
return 0;
}
+
+/**
+ * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
+ * force-nopriv register
+ *
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if the register is in force-nonpriv whitelist;
+ * False if outside;
+ */
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+ unsigned int offset)
+{
+ return in_whitelist(offset);
+}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 84d8016..1ea3eb2 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
- struct intel_vgpu *vgpu;
+ struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
void *gvt;
@@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) {
- gvt_err("failed to find type %s to create\n",
+ gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj));
ret = -EINVAL;
goto out;
@@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
- gvt_err("failed to create intel vgpu: %d\n", ret);
+ gvt_vgpu_err("failed to create intel vgpu: %d\n", ret);
goto out;
}
@@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
&vgpu->vdev.iommu_notifier);
if (ret != 0) {
- gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
+ gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
+ ret);
goto out;
}
@@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
&vgpu->vdev.group_notifier);
if (ret != 0) {
- gvt_err("vfio_register_notifier for group failed: %d\n", ret);
+ gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
+ ret);
goto undo_iommu;
}
@@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
if (index >= VFIO_PCI_NUM_REGIONS) {
- gvt_err("invalid index: %u\n", index);
+ gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX:
default:
- gvt_err("unsupported region: %u\n", index);
+ gvt_vgpu_err("unsupported region: %u\n", index);
}
return ret == 0 ? count : ret;
@@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
trigger = eventfd_ctx_fdget(fd);
if (IS_ERR(trigger)) {
- gvt_err("eventfd_ctx_fdget failed\n");
+ gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
vgpu->vdev.msi_trigger = trigger;
@@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size);
if (ret) {
- gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
+ gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
return -EINVAL;
}
if (data_size) {
@@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvm = vgpu->vdev.kvm;
if (!kvm || kvm->mm != current->mm) {
- gvt_err("KVM is required to use Intel vGPU\n");
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
}
@@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
+ struct intel_vgpu *vgpu = info->vgpu;
+
if (!info) {
- gvt_err("kvmgt_guest_info invalid\n");
+ gvt_vgpu_err("kvmgt_guest_info invalid\n");
return false;
}
@@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
unsigned long iova, pfn;
struct kvmgt_guest_info *info;
struct device *dev;
+ struct intel_vgpu *vgpu;
int rc;
if (!handle_valid(handle))
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
+ vgpu = info->vgpu;
iova = gvt_cache_find(info->vgpu, gfn);
if (iova != INTEL_GVT_INVALID_ADDR)
return iova;
@@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
dev = mdev_dev(info->vgpu->vdev.mdev);
rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
if (rc != 1) {
- gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
+ gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
+ gfn, rc);
return INTEL_GVT_INVALID_ADDR;
}
/* transfer to host iova for GFX to use DMA */
rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
if (rc) {
- gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+ gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
vfio_unpin_pages(dev, &gfn, 1);
return INTEL_GVT_INVALID_ADDR;
}
@@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
- int ret;
+ int idx, ret;
bool kthread = current->mm == NULL;
if (!handle_valid(handle))
@@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
if (kthread)
use_mm(kvm->mm);
+ idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
+ srcu_read_unlock(&kvm->srcu, idx);
if (kthread)
unuse_mm(kvm->mm);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 60b698c..1ba3bdb 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
if (ret) {
- gvt_err("vgpu%d: guest page read error %d, "
+ gvt_vgpu_err("guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- vgpu->id, ret,
- gp->gfn, pa, *(u32 *)p_data, bytes);
+ ret, gp->gfn, pa, *(u32 *)p_data,
+ bytes);
}
mutex_unlock(&gvt->lock);
return ret;
@@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
if (!vgpu->mmio.disable_warn_untrack) {
- gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
- vgpu->id, offset, bytes, *(u32 *)p_data);
+ gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n",
+ offset, bytes, *(u32 *)p_data);
if (offset == 0x206c) {
- gvt_err("------------------------------------------\n");
- gvt_err("vgpu%d: likely triggers a gfx reset\n",
- vgpu->id);
- gvt_err("------------------------------------------\n");
+ gvt_vgpu_err("------------------------------------------\n");
+ gvt_vgpu_err("likely triggers a gfx reset\n");
+ gvt_vgpu_err("------------------------------------------\n");
vgpu->mmio.disable_warn_untrack = true;
}
}
@@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock);
return 0;
err:
- gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
- vgpu->id, offset, bytes);
+ gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
+ offset, bytes);
mutex_unlock(&gvt->lock);
return ret;
}
@@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (gp) {
ret = gp->handler(gp, pa, p_data, bytes);
if (ret) {
- gvt_err("vgpu%d: guest page write error %d, "
- "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- vgpu->id, ret,
- gp->gfn, pa, *(u32 *)p_data, bytes);
+ gvt_err("guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, "
+ "var 0x%x, len %d\n",
+ ret, gp->gfn, pa,
+ *(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);
return ret;
@@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
/* all register bits are RO. */
if (ro_mask == ~(u64)0) {
- gvt_err("vgpu%d: try to write RO reg %x\n",
- vgpu->id, offset);
+ gvt_vgpu_err("try to write RO reg %x\n",
+ offset);
ret = 0;
goto out;
}
@@ -360,8 +360,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_unlock(&gvt->lock);
return 0;
err:
- gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
- vgpu->id, offset, bytes);
+ gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
+ bytes);
mutex_unlock(&gvt->lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 3bc620f..a3a0270 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+
+bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
+ unsigned int offset);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 5d1caf9..3117991 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to get MFN from VA\n");
+ gvt_vgpu_err("fail to get MFN from VA\n");
return -EINVAL;
}
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map);
if (ret) {
- gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
+ gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
+ ret);
return ret;
}
}
@@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
if (!(swsci & SWSCI_SCI_SELECT)) {
- gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
+ gvt_vgpu_err("requesting SMI service\n");
return 0;
}
/* ignore non 0->1 trasitions */
@@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
func = GVT_OPREGION_FUNC(*scic);
subfunc = GVT_OPREGION_SUBFUNC(*scic);
if (!querying_capabilities(*scic)) {
- gvt_err("vgpu%d: requesting runtime service: func \"%s\","
+ gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n",
- vgpu->id,
opregion_func_name(func),
opregion_subfunc_name(subfunc));
/*
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 73f052a..95ee091 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
I915_WRITE_FW(reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
- gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg(vgpu, regs[ring_id]) = 0;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 06c9584..34b9acd 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,7 +101,7 @@ struct tbs_sched_data {
struct list_head runq_head;
};
-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
static void tbs_sched_func(struct work_struct *work)
{
@@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
- schedule_delayed_work(&sched_data->work, sched_data->period);
+ schedule_delayed_work(&sched_data->work, 0);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c9..c4353ed 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("Invalid guest context descriptor\n");
+ gvt_vgpu_err("Invalid guest context descriptor\n");
return -EINVAL;
}
@@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu, shadow_ctx_notifier_block);
- struct drm_i915_gem_request *req =
- (struct drm_i915_gem_request *)data;
- struct intel_gvt_workload_scheduler *scheduler =
- &vgpu->gvt->scheduler;
+ struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+ struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+ shadow_ctx_notifier_block[req->engine->id]);
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
@@ -175,7 +173,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -187,9 +187,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&dev_priv->drm.struct_mutex);
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ret = engine->context_pin(engine, shadow_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to pin shadow context\n");
+ workload->status = ret;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+ }
+
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
- gvt_err("fail to allocate gem request\n");
+ gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto out;
}
@@ -202,9 +217,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
- ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
- if (ret)
- goto out;
+ if ((workload->ring_id == RCS) &&
+ (workload->wa_ctx.indirect_ctx.size != 0)) {
+ ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret)
+ goto out;
+ }
ret = populate_shadow_context(workload);
if (ret)
@@ -227,6 +245,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq);
+ else
+ engine->context_unpin(engine, shadow_ctx);
+
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -322,7 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_err("invalid guest context descriptor\n");
+ gvt_vgpu_err("invalid guest context descriptor\n");
return;
}
@@ -376,6 +397,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* For the workload w/o request, directly complete the workload.
*/
if (workload->req) {
+ struct drm_i915_private *dev_priv =
+ workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine =
+ dev_priv->engine[workload->ring_id];
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -388,6 +413,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event);
}
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ /* unpin shadow ctx as the shadow_ctx update is done */
+ engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -417,6 +446,7 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
+ struct intel_vgpu *vgpu = NULL;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -459,25 +489,14 @@ static int workload_thread(void *priv)
mutex_unlock(&gvt->lock);
if (ret) {
- gvt_err("fail to dispatch workload, skip\n");
+ vgpu = workload->vgpu;
+ gvt_vgpu_err("fail to dispatch workload, skip\n");
goto complete;
}
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
-retry:
- i915_wait_request(workload->req,
- 0, MAX_SCHEDULE_TIMEOUT);
- /* I915 has replay mechanism and a request will be replayed
- * if there is i915 reset. So the seqno will be updated anyway.
- * If the seqno is not updated yet after waiting, which means
- * the replay may still be in progress and we can wait again.
- */
- if (!i915_gem_request_completed(workload->req)) {
- gvt_dbg_sched("workload %p not completed, wait again\n",
- workload);
- goto retry;
- }
+ i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
@@ -513,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- int i;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id i;
gvt_dbg_core("clean workload scheduler\n");
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (scheduler->thread[i]) {
- kthread_stop(scheduler->thread[i]);
- scheduler->thread[i] = NULL;
- }
+ for_each_engine(engine, gvt->dev_priv, i) {
+ atomic_notifier_chain_unregister(
+ &engine->context_status_notifier,
+ &gvt->shadow_ctx_notifier_block[i]);
+ kthread_stop(scheduler->thread[i]);
}
}
@@ -529,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id i;
int ret;
- int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- /* check ring mask at init time */
- if (!HAS_ENGINE(gvt->dev_priv, i))
- continue;
-
+ for_each_engine(engine, gvt->dev_priv, i) {
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
+
+ gvt->shadow_ctx_notifier_block[i].notifier_call =
+ shadow_context_status_change;
+ atomic_notifier_chain_register(&engine->context_status_notifier,
+ &gvt->shadow_ctx_notifier_block[i]);
}
return 0;
err:
@@ -570,9 +592,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
- &vgpu->shadow_ctx_notifier_block);
-
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}
@@ -587,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
- vgpu->shadow_ctx_notifier_block.notifier_call =
- shadow_context_status_change;
-
- atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
- &vgpu->shadow_ctx_notifier_block);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e703556..1c75402 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_IRQ_ACTIVE:
case I915_PARAM_ALLOW_BATCHBUFFER:
case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD2:
value = !!dev_priv->engine[VCS2];
break;
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- value = INTEL_GEN(dev_priv) >= 4;
- break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
break;
@@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- i915_gem_reset_finish(dev_priv);
+ i915_gem_reset(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
@@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
i915_queue_hangcheck(dev_priv);
wakeup:
+ i915_gem_reset_finish(dev_priv);
enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
return;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4b42d..1e53c31 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -293,6 +293,7 @@ enum plane_id {
PLANE_PRIMARY,
PLANE_SPRITE0,
PLANE_SPRITE1,
+ PLANE_SPRITE2,
PLANE_CURSOR,
I915_MAX_PLANES,
};
@@ -1324,7 +1325,7 @@ struct intel_gen6_power_mgmt {
unsigned boosts;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -2063,8 +2064,6 @@ struct drm_i915_private {
const struct intel_device_info info;
- int relative_constants_mode;
-
void __iomem *regs;
struct intel_uncore uncore;
@@ -3341,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6908123..67b1fc5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = -ENODEV;
+ if (obj->ops->pwrite)
+ ret = obj->ops->pwrite(obj, args);
+ if (ret != -ENODEV)
+ goto err;
+
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
@@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
+ obj->mm.pages = ERR_PTR(-EFAULT);
}
/* Try to discard unwanted pages */
@@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
__i915_gem_object_reset_page_iter(obj);
- obj->ops->put_pages(obj, pages);
+ if (!IS_ERR(pages))
+ obj->ops->put_pages(obj, pages);
+
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err)
return err;
- if (unlikely(!obj->mm.pages)) {
+ if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
@@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
pinned = true;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(!obj->mm.pages)) {
+ if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
@@ -2563,6 +2572,75 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto out_unlock;
}
+static int
+i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg)
+{
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+ u64 remain, offset;
+ unsigned int pg;
+
+ /* Before we instantiate/pin the backing store for our use, we
+ * can prepopulate the shmemfs filp efficiently using a write into
+ * the pagecache. We avoid the penalty of instantiating all the
+ * pages, important if the user is just writing to a few and never
+ * uses the object on the GPU, and using a direct write into shmemfs
+ * allows it to avoid the cost of retrieving a page (either swapin
+ * or clearing-before-use) before it is overwritten.
+ */
+ if (READ_ONCE(obj->mm.pages))
+ return -ENODEV;
+
+ /* Before the pages are instantiated the object is treated as being
+ * in the CPU domain. The pages will be clflushed as required before
+ * use, and we can freely write into the pages directly. If userspace
+ * races pwrite with any other operation; corruption will ensue -
+ * that is userspace's prerogative!
+ */
+
+ remain = arg->size;
+ offset = arg->offset;
+ pg = offset_in_page(offset);
+
+ do {
+ unsigned int len, unwritten;
+ struct page *page;
+ void *data, *vaddr;
+ int err;
+
+ len = PAGE_SIZE - pg;
+ if (len > remain)
+ len = remain;
+
+ err = pagecache_write_begin(obj->base.filp, mapping,
+ offset, len, 0,
+ &page, &data);
+ if (err < 0)
+ return err;
+
+ vaddr = kmap(page);
+ unwritten = copy_from_user(vaddr + pg, user_data, len);
+ kunmap(page);
+
+ err = pagecache_write_end(obj->base.filp, mapping,
+ offset, len, len - unwritten,
+ page, data);
+ if (err < 0)
+ return err;
+
+ if (unwritten)
+ return -EFAULT;
+
+ remain -= len;
+ user_data += len;
+ offset += len;
+ pg = 0;
+ } while (remain);
+
+ return 0;
+}
+
static bool ban_context(const struct i915_gem_context *ctx)
{
return (i915_gem_context_is_bannable(ctx) &&
@@ -2641,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *request;
+ /* Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its engine->irq_tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the engine->irq_tasklet until the reset is over
+ * prevents the race.
+ */
tasklet_kill(&engine->irq_tasklet);
+ tasklet_disable(&engine->irq_tasklet);
if (engine_stalled(engine)) {
request = i915_gem_find_active_request(engine);
@@ -2756,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
engine->reset_hw(engine, request);
}
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2778,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
}
}
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ for_each_engine(engine, dev_priv, id)
+ tasklet_enable(&engine->irq_tasklet);
+}
+
static void nop_submit_request(struct drm_i915_gem_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
@@ -3029,6 +3127,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout_ns < 0)
args->timeout_ns = 0;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regression from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+ args->timeout_ns = 0;
}
i915_gem_object_put(obj);
@@ -3974,8 +4082,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
+
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
+
+ .pwrite = i915_gem_object_pwrite_gtt,
};
struct drm_i915_gem_object *
@@ -4583,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 17f90c6..e2d83b6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
* present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b..e9c008f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,9 +160,6 @@ struct i915_gem_context {
/** desc_template: invariant fields for the HW context descriptor */
u32 desc_template;
- /** status_notifier: list of callbacks for context-switch changes */
- struct atomic_notifier_head status_notifier;
-
/** guilty_count: How many times this context has caused a GPU hang. */
unsigned int guilty_count;
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c181b1b..3be2503 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* those as well to make room for our guard pages.
*/
if (check_color) {
- if (vma->node.start + vma->node.size == node->start) {
- if (vma->node.color == node->color)
+ if (node->start + node->size == target->start) {
+ if (node->color == target->color)
continue;
}
- if (vma->node.start == node->start + node->size) {
- if (vma->node.color == node->color)
+ if (node->start == target->start + target->size) {
+ if (node->color == target->color)
continue;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d02cfae..30e0675 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas)
{
- struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
- int instp_mode;
- u32 instp_mask;
int ret;
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
if (ret)
return ret;
- instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- instp_mask = I915_EXEC_CONSTANTS_MASK;
- switch (instp_mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && params->engine->id != RCS) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (instp_mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev_priv)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev_priv)->gen > 5 &&
- instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev_priv)->gen >= 6)
- instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ if (args->flags & I915_EXEC_CONSTANTS_MASK) {
+ DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
return -EINVAL;
}
- if (params->engine->id == RCS &&
- instp_mode != dev_priv->relative_constants_mode) {
- struct intel_ring *ring = params->request->ring;
-
- ret = intel_ring_begin(params->request, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = instp_mode;
- }
-
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(params->request);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index bf90b07..76b80a0 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+ int (*pwrite)(struct drm_i915_gem_object *,
+ const struct drm_i915_gem_pwrite *);
+
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 401006b..d5d2b4c 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+ synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
return freed;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6ffef2..b6c886a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
+ if (prev->cz_clock) {
+ u64 time, c0;
+ unsigned int mul;
+
+ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ mul <<= 8;
+
+ time = now.cz_clock - prev->cz_clock;
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ c0 = now.render_c0 - prev->render_c0;
+ c0 += now.media_c0 - prev->media_c0;
+ c0 *= mul;
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
- }
-
+ dev_priv->rps.ei = now;
return events;
}
@@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
+ /* Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block registers
+ * outside of the power domain. We defer setting up the display irqs
+ * in this case to the runtime pm.
+ */
+ dev_priv->display_irqs_enabled = true;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display_irqs_enabled = false;
+
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e..df20e9b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return ret;
}
+static void
+i915_vma_remove(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
- unsigned int bound = vma->flags;
+ const unsigned int bound = vma->flags;
int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if ((bound & I915_VMA_BIND_MASK) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags);
if (ret)
- goto err;
+ goto err_unpin;
}
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
- goto err;
+ goto err_remove;
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
-err:
+err_remove:
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ GEM_BUG_ON(vma->pages);
+ i915_vma_remove(vma);
+ }
+err_unpin:
__i915_vma_unpin(vma);
return ret;
}
@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages);
@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->pages = NULL;
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_link,
- &to_i915(obj->base.dev)->mm.unbound_list);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ i915_vma_remove(vma);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 0085bc7..de219b7 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -35,7 +35,6 @@
*/
#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0134167..ed1f4f2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
crtc->base.mode = crtc->base.state->mode;
- DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
- old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
@@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state;
- DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
if (crtc->config->pch_pfit.enabled) {
int id;
- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
- DRM_ERROR("Requesting pfit without getting a scaler first\n");
+ if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
- }
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
- DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
}
}
@@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
} while (progress);
}
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+ struct intel_atomic_state *state, *next;
+ struct llist_node *freed;
+
+ freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ llist_for_each_entry_safe(state, next, freed, freed)
+ drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+
+ intel_atomic_helper_free_state(dev_priv);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* can happen also when the device is completely off.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+ intel_atomic_helper_free_state(dev_priv);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
+ if (!modeset &&
+ (intel_cstate->base.color_mgmt_changed ||
+ intel_cstate->update_pipe)) {
+ intel_color_set_csc(crtc->state);
+ intel_color_load_luts(crtc->state);
+ }
+
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
goto out;
- if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
- intel_color_set_csc(crtc->state);
- intel_color_load_luts(crtc->state);
- }
-
if (intel_cstate->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
@@ -16599,18 +16611,6 @@ static void sanitize_watermarks(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
}
-static void intel_atomic_helper_free_state(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), atomic_helper.free_work);
- struct intel_atomic_state *state, *next;
- struct llist_node *freed;
-
- freed = llist_del_all(&dev_priv->atomic_helper.free_list);
- llist_for_each_entry_safe(state, next, freed, freed)
- drm_atomic_state_put(&state->base);
-}
-
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
INIT_WORK(&dev_priv->atomic_helper.free_work,
- intel_atomic_helper_free_state);
+ intel_atomic_helper_free_state_worker);
intel_init_quirks(dev);
@@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev)
}
}
- intel_update_czclk(dev_priv);
- intel_update_cdclk(dev_priv);
- dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
-
intel_shared_dpll_init(dev);
+ intel_update_czclk(dev_priv);
+ intel_modeset_init_hw(dev);
+
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev_priv);
@@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev_priv);
- intel_modeset_init_hw(dev);
-
intel_setup_overlay(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 371acf10..ab1be5c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
dev_priv->engine[id] = engine;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 1b8ba2e..2d449fb 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, mask;
+ unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
- int pass = 0;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
mask = BIT(count) - 1;
conn_configured = 0;
retry:
+ conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -387,7 +387,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
if (conn_configured & BIT(i))
continue;
- if (pass == 0 && !connector->has_tile)
+ if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -498,10 +498,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask) {
- pass++;
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
- }
/*
* If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index d23c0fc..8c04eca 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
goto bail;
}
+ if (!i915.enable_execlists) {
+ DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n");
+ goto bail;
+ }
+
/*
* We're not in host or fail to find a MPT module, disable GVT-g
*/
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ebae2bd..24b2fa5 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->base.crtc->dev);
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int i;
- if (HAS_GMCH_DISPLAY(to_i915(dev)))
+ if (HAS_GMCH_DISPLAY(dev_priv))
return false;
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
- return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
+ if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+ return false;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ const struct drm_display_info *info = &connector->display_info;
+
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
+
+ if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
+ return false;
+ }
+
+ return true;
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b62e3f8..54208be 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
}
}
- if (dev_priv->display.hpd_irq_setup)
+ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
- if (storm_detected)
+ if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
}
static void i915_hpd_poll_init_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ebf8023..471af3b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
return;
- atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
}
static void
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 249623d..6a29784 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ goto skip_hw_write;
+
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
@@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
u32 mask = 0;
+ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
@@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
@@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
* @timeout_base_ms: timeout for polling with preemption enabled
*
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
* The request is acknowledged once the PCODE reply dword equals @reply after
* applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 10 ms with
+ * for @timeout_base_ms and if this times out for another 50 ms with
* preemption disabled.
*
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
* worst case) _and_ PCODE was busy for some reason even after a
* (queued) request and @timeout_base_ms delay. As a workaround retry
* the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
* account for interrupts that could reduce the number of these
- * requests.
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
*/
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
- ret = wait_for_atomic(COND, 10);
+ ret = wait_for_atomic(COND, 50);
preempt_enable();
out:
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c2b8d..13dccb1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -403,6 +403,9 @@ struct intel_engine_cs {
*/
struct i915_gem_context *legacy_active_context;
+ /* status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head context_status_notifier;
+
struct intel_engine_hangcheck hangcheck;
bool needs_cmd_parser;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9ef5468..9481ca9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler;
- DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
- plane_id, PS_PLANE_SEL(plane_id));
-
scaler = &crtc_state->scaler_state.scalers[scaler_id];
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abe0888..b7ff592 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_wait_ack(d);
+
+ dev_priv->uncore.fw_domains_active |= fw_domains;
}
static void
@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
fw_domain_put(d);
fw_domain_posting_read(d);
}
+
+ dev_priv->uncore.fw_domains_active &= ~fw_domains;
}
static void
@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0) {
+ if (--domain->wake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
- dev_priv->uncore.fw_domains_active &= ~domain->mask;
- }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask;
}
- if (fw_domains) {
+ if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
- dev_priv->uncore.fw_domains_active |= fw_domains;
- }
}
/**
@@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
- dev_priv->uncore.fw_domains_active |= fw_domains;
}
static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c3..ee5883f 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
int ret = 0;
- if (WARN_ON(!obj->filp))
- return -EINVAL;
-
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d12b897..c7af9fd 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ }
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index f80bf93..d745e8b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ unsigned long flags;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
LCDC_PALETTE_LOAD_MODE_MASK);
+
+ /* There is no real chance for a race here as the time stamp
+ * is taken before the raster DMA is started. The spin-lock is
+ * taken to have a memory barrier after taking the time-stamp
+ * and to avoid a context switch between taking the stamp and
+ * enabling the raster.
+ */
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ tilcdc_crtc->last_vblank = ktime_get();
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
drm_crtc_vblank_on(crtc);
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
}
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = 0;
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
drm_framebuffer_reference(fb);
crtc->primary->fb = fb;
+ tilcdc_crtc->event = event;
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ mutex_lock(&tilcdc_crtc->enable_lock);
- if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ if (tilcdc_crtc->enabled) {
+ unsigned long flags;
ktime_t next_vblank;
s64 tdiff;
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
tilcdc_crtc->next_fb = fb;
+ else
+ set_scanout(crtc, fb);
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
}
- if (tilcdc_crtc->next_fb != fb)
- set_scanout(crtc, fb);
-
- tilcdc_crtc->event = event;
-
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+ mutex_unlock(&tilcdc_crtc->enable_lock);
return 0;
}
@@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
fail:
tilcdc_crtc_destroy(crtc);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1aeb80e..8c54cb8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -175,11 +175,11 @@
Support for Cherry Cymotion keyboard.
config HID_CHICONY
- tristate "Chicony Tactical pad"
+ tristate "Chicony devices"
depends on HID
default !EXPERT
---help---
- Support for Chicony Tactical pad.
+ Support for Chicony Tactical pad and special keys on Chicony keyboards.
config HID_CORSAIR
tristate "Corsair devices"
@@ -190,6 +190,7 @@
Supported devices:
- Vengeance K90
+ - Scimitar PRO RGB
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index bc3cec1..f04ed9a 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e9e87d3..3ceb4a2 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index c0303f6..9ba5d98 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -3,8 +3,10 @@
*
* Supported devices:
* - Vengeance K90 Keyboard
+ * - Scimitar PRO RGB Gaming Mouse
*
* Copyright (c) 2015 Clement Vuchener
+ * Copyright (c) 2017 Oscar Campos
*/
/*
@@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev,
return 0;
}
+/*
+ * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is
+ * non parseable as they define two consecutive Logical Minimum for
+ * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16
+ * that should be obviousy 0x26 for Logical Magimum of 16 bits. This
+ * prevents poper parsing of the report descriptor due Logical
+ * Minimum being larger than Logical Maximum.
+ *
+ * This driver fixes the report descriptor for:
+ * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse
+ */
+
+static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ /*
+ * Corsair Scimitar RGB Pro report descriptor is broken and
+ * defines two different Logical Minimum for the Consumer
+ * Application. The byte 77 should be a 0x26 defining a 16
+ * bits integer for the Logical Maximum but it is a 0x16
+ * instead (Logical Minimum)
+ */
+ switch (hdev->product) {
+ case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB:
+ if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16
+ && rdesc[78] == 0xff && rdesc[79] == 0x0f) {
+ hid_info(hdev, "Fixing up report descriptor\n");
+ rdesc[77] = 0x26;
+ }
+ break;
+ }
+
+ }
+ return rdesc;
+}
+
static const struct hid_device_id corsair_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90),
.driver_data = CORSAIR_USE_K90_MACRO |
CORSAIR_USE_K90_BACKLIGHT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR,
+ USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
{}
};
@@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = {
.event = corsair_event,
.remove = corsair_remove,
.input_mapping = corsair_input_mapping,
+ .report_fixup = corsair_mouse_report_fixup,
};
module_hid_driver(corsair_driver);
MODULE_LICENSE("GPL");
+/* Original K90 driver author */
MODULE_AUTHOR("Clement Vuchener");
+/* Scimitar PRO RGB driver author */
+MODULE_AUTHOR("Oscar Campos");
MODULE_DESCRIPTION("HID driver for Corsair devices");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 86c95d3..0e2e7c57 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -278,6 +278,9 @@
#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
#define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15
#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
+#define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38
+#define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39
+#define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e
#define USB_VENDOR_ID_CREATIVELABS 0x041e
#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
@@ -557,6 +560,7 @@
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
+#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
#define USB_VENDOR_ID_JESS2 0x0f30
#define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index f405b07..740996f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2632,6 +2632,8 @@ static int sony_input_configured(struct hid_device *hdev,
sony_leds_remove(sc);
if (sc->quirks & SONY_BATTERY_SUPPORT)
sony_battery_remove(sc);
+ if (sc->touchpad)
+ sony_unregister_touchpad(sc);
sony_cancel_work_sync(sc);
kfree(sc->output_report_dmabuf);
sony_remove_dev_list(sc);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index d6847a6..a69a3c8 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -80,6 +80,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index be8f7e2..994bddc 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2579,7 +2579,9 @@ static void wacom_remove(struct hid_device *hdev)
/* make sure we don't trigger the LEDs */
wacom_led_groups_release(wacom);
- wacom_release_resources(wacom);
+
+ if (wacom->wacom_wac.features.type != REMOTE)
+ wacom_release_resources(wacom);
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 4aa3de9..94250c2 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ }
break;
case WACOM_HID_WD_FINGERWHEEL:
wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
@@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 =
WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
static const struct wacom_features wacom_features_0x360 =
{ "Wacom Intuos Pro M", 44800, 29600, 8191, 63,
- INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+ INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
static const struct wacom_features wacom_features_0x361 =
{ "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
- INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 };
+ INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index bd0d198..321b883 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
wait_for_completion(&info->waitevent);
- if (channel->rescind) {
- ret = -ENODEV;
- goto post_msg_err;
- }
-
post_msg_err:
+ /*
+ * If the channel has been rescinded;
+ * we will be awakened by the rescind
+ * handler; set the error code to zero so we don't leak memory.
+ */
+ if (channel->rescind)
+ ret = 0;
+
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
int ret;
/*
- * vmbus_on_event(), running in the tasklet, can race
+ * vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
* the former is accessing channel->inbound.ring_buffer, the latter
- * could be freeing the ring_buffer pages.
- *
- * To resolve the race, we can serialize them by disabling the
- * tasklet when the latter is running here.
+ * could be freeing the ring_buffer pages, so here we must stop it
+ * first.
*/
- hv_event_tasklet_disable(channel);
+ tasklet_disable(&channel->callback_event);
/*
* In case a device driver's probe() fails (e.g.,
@@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
- hv_event_tasklet_enable(channel);
-
return ret;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index f33465d..fbcb063 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
- kfree(channel);
+
+ kfree_rcu(channel, rcu);
}
static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
- list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
+ list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
}
static void percpu_channel_deq(void *arg)
{
struct vmbus_channel *channel = arg;
- list_del(&channel->percpu_list);
+ list_del_rcu(&channel->percpu_list);
}
@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
true);
}
-void hv_event_tasklet_disable(struct vmbus_channel *channel)
-{
- tasklet_disable(&channel->callback_event);
-}
-
-void hv_event_tasklet_enable(struct vmbus_channel *channel)
-{
- tasklet_enable(&channel->callback_event);
-
- /* In case there is any pending event */
- tasklet_schedule(&channel->callback_event);
-}
-
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
{
unsigned long flags;
@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
- hv_event_tasklet_disable(channel);
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
percpu_channel_deq(channel);
put_cpu();
}
- hv_event_tasklet_enable(channel);
if (channel->primary_channel == NULL) {
list_del(&channel->listentry);
@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
init_vp_index(newchannel, dev_type);
- hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_enq(newchannel);
put_cpu();
}
- hv_event_tasklet_enable(newchannel);
/*
* This state is used to indicate a successful open
@@ -565,7 +549,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
list_del(&newchannel->listentry);
mutex_unlock(&vmbus_connection.channel_mutex);
- hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
@@ -574,7 +557,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_deq(newchannel);
put_cpu();
}
- hv_event_tasklet_enable(newchannel);
vmbus_release_relid(newchannel->offermsg.child_relid);
@@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
+ vmbus_release_relid(offer->child_relid);
pr_err("Unable to allocate channel object\n");
return;
}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 9aee601..a5596a6 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
static const char fcopy_devname[] = "vmbus/hv_fcopy";
static u8 *recv_buffer;
static struct hvutil_transport *hvt;
-static struct completion release_event;
/*
* This state maintains the version number registered by the daemon.
*/
@@ -331,7 +330,6 @@ static void fcopy_on_reset(void)
if (cancel_delayed_work_sync(&fcopy_timeout_work))
fcopy_respond_to_host(HV_E_FAIL);
- complete(&release_event);
}
int hv_fcopy_init(struct hv_util_service *srv)
@@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv)
recv_buffer = srv->recv_buffer;
fcopy_transaction.recv_channel = srv->channel;
- init_completion(&release_event);
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
@@ -361,5 +358,4 @@ void hv_fcopy_deinit(void)
fcopy_transaction.state = HVUTIL_DEVICE_DYING;
cancel_delayed_work_sync(&fcopy_timeout_work);
hvutil_transport_destroy(hvt);
- wait_for_completion(&release_event);
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index de26371..a1adfe2 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp";
static u8 *recv_buffer;
static struct hvutil_transport *hvt;
-static struct completion release_event;
/*
* Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number.
@@ -714,7 +713,6 @@ static void kvp_on_reset(void)
if (cancel_delayed_work_sync(&kvp_timeout_work))
kvp_respond_to_host(NULL, HV_E_FAIL);
kvp_transaction.state = HVUTIL_DEVICE_INIT;
- complete(&release_event);
}
int
@@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv)
recv_buffer = srv->recv_buffer;
kvp_transaction.recv_channel = srv->channel;
- init_completion(&release_event);
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
@@ -747,5 +744,4 @@ void hv_kvp_deinit(void)
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
hvutil_transport_destroy(hvt);
- wait_for_completion(&release_event);
}
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index bcc03f0..e659d1b 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -79,7 +79,6 @@ static int dm_reg_value;
static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer;
static struct hvutil_transport *hvt;
-static struct completion release_event;
static void vss_timeout_func(struct work_struct *dummy);
static void vss_handle_request(struct work_struct *dummy);
@@ -361,13 +360,11 @@ static void vss_on_reset(void)
if (cancel_delayed_work_sync(&vss_timeout_work))
vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_DEVICE_INIT;
- complete(&release_event);
}
int
hv_vss_init(struct hv_util_service *srv)
{
- init_completion(&release_event);
if (vmbus_proto_version < VERSION_WIN8_1) {
pr_warn("Integration service 'Backup (volume snapshot)'"
" not supported on this host version.\n");
@@ -400,5 +397,4 @@ void hv_vss_deinit(void)
cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work);
hvutil_transport_destroy(hvt);
- wait_for_completion(&release_event);
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 3042eaa..186b100 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv)
if (!hyperv_cs)
return -ENODEV;
+ spin_lock_init(&host_ts.lock);
+
INIT_WORK(&wrk.work, hv_set_host_time);
/*
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index c235a95..4402a71 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file)
* connects back.
*/
hvt_reset(hvt);
- mutex_unlock(&hvt->lock);
if (mode_old == HVUTIL_TRANSPORT_DESTROY)
- hvt_transport_free(hvt);
+ complete(&hvt->release);
+
+ mutex_unlock(&hvt->lock);
return 0;
}
@@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
init_waitqueue_head(&hvt->outmsg_q);
mutex_init(&hvt->lock);
+ init_completion(&hvt->release);
spin_lock(&hvt_list_lock);
list_add(&hvt->list, &hvt_list);
@@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt)
if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
cn_del_callback(&hvt->cn_id);
- if (mode_old != HVUTIL_TRANSPORT_CHARDEV)
- hvt_transport_free(hvt);
+ if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
+ wait_for_completion(&hvt->release);
+
+ hvt_transport_free(hvt);
}
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h
index d98f522..79afb62 100644
--- a/drivers/hv/hv_utils_transport.h
+++ b/drivers/hv/hv_utils_transport.h
@@ -41,6 +41,7 @@ struct hvutil_transport {
int outmsg_len; /* its length */
wait_queue_head_t outmsg_q; /* poll/read wait queue */
struct mutex lock; /* protects struct members */
+ struct completion release; /* synchronize with fd release */
};
struct hvutil_transport *hvutil_transport_init(const char *name,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index da6b59b..8370b9dc 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (relid == 0)
continue;
+ rcu_read_lock();
+
/* Find channel based on relid */
- list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
+ list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
if (channel->offermsg.child_relid != relid)
continue;
@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
tasklet_schedule(&channel->callback_event);
}
}
+
+ rcu_read_unlock();
}
}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index cccef87..975c43d 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
else
err = atk_read_value_new(sensor, value);
+ if (err)
+ return err;
+
sensor->is_valid = true;
sensor->last_updated = jiffies;
sensor->cached_value = *value;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index efb01c2..4dfc723 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void)
{
int sioaddr[2] = { REG_2E, REG_4E };
struct it87_sio_data sio_data;
- unsigned short isa_address;
+ unsigned short isa_address[2];
bool found = false;
int i, err;
@@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void)
for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
memset(&sio_data, 0, sizeof(struct it87_sio_data));
- isa_address = 0;
- err = it87_find(sioaddr[i], &isa_address, &sio_data);
- if (err || isa_address == 0)
+ isa_address[i] = 0;
+ err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
+ if (err || isa_address[i] == 0)
continue;
+ /*
+ * Don't register second chip if its ISA address matches
+ * the first chip's ISA address.
+ */
+ if (i && isa_address[i] == isa_address[0])
+ break;
- err = it87_device_add(i, isa_address, &sio_data);
+ err = it87_device_add(i, isa_address[i], &sio_data);
if (err)
goto exit_dev_unregister;
+
found = true;
+
+ /*
+ * IT8705F may respond on both SIO addresses.
+ * Stop probing after finding one.
+ */
+ if (sio_data.type == it87)
+ break;
}
if (!found) {
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index c1b9275..281491c 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
data->pwm[channel] = val << 8;
err = i2c_smbus_write_word_swapped(client,
MAX31790_REG_PWMOUT(channel),
- val);
+ data->pwm[channel]);
break;
case hwmon_pwm_enable:
fan_config = data->fan_config[channel];
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index cdd9b3b..7563ece 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
else
intel_th_trace_enable(thdev);
- if (ret)
+ if (ret) {
pm_runtime_put(&thdev->dev);
+ module_put(thdrv->driver.owner);
+ }
return ret;
}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0bba384..590cf90 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Denverton */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Gemini Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index ad9dec3..4282cec 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- unsigned int status, config;
+ unsigned int status, config, adc_fsm;
+ unsigned short count = 0;
+
status = tiadc_readl(adc_dev, REG_IRQSTATUS);
/*
@@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
tiadc_writel(adc_dev, REG_CTRL, config);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+
+ /* wait for idle state.
+ * ADC needs to finish the current conversion
+ * before disabling the module
+ */
+ do {
+ adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
+ } while (adc_fsm != 0x10 && count++ < 100);
+
tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
return IRQ_HANDLED;
} else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a3cce3a..ecf592d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
st->report_state.report_id,
st->report_state.index,
HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
-
- poll_value = hid_sensor_read_poll_value(st);
} else {
int val;
@@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
- if (state && poll_value)
+ if (state)
+ poll_value = hid_sensor_read_poll_value(st);
+ if (poll_value > 0)
msleep_interruptible(poll_value * 2);
return 0;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 78532ce..81b572d 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
if (err < 0)
goto out;
- fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) |
- (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
+ fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
+ (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
wdata = cpu_to_le16(fifo_watermark);
err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 6dd8cbd..e13370d 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -763,7 +763,7 @@ static int ak8974_probe(struct i2c_client *i2c,
return ret;
}
-static int __exit ak8974_remove(struct i2c_client *i2c)
+static int ak8974_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = {
.of_match_table = of_match_ptr(ak8974_of_match),
},
.probe = ak8974_probe,
- .remove = __exit_p(ak8974_remove),
+ .remove = ak8974_remove,
.id_table = ak8974_id,
};
module_i2c_driver(ak8974_driver);
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index e955101..f2ae75f 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
{
int i, n, completed = 0;
- while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) {
+ /*
+ * budget might be (-1) if the caller does not
+ * want to bound this call, thus we need unsigned
+ * minimum here.
+ */
+ while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
+ budget - completed), cq->wc)) > 0) {
for (i = 0; i < n; i++) {
struct ib_wc *wc = &cq->wc[i];
@@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq)
irq_poll_disable(&cq->iop);
break;
case IB_POLL_WORKQUEUE:
- flush_work(&cq->work);
+ cancel_work_sync(&cq->work);
break;
default:
WARN_ON_ONCE(1);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 593d2ce..7c9e34d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
struct device *parent = device->dev.parent;
WARN_ON_ONCE(!parent);
- if (!device->dev.dma_ops)
- device->dev.dma_ops = parent->dma_ops;
- if (!device->dev.dma_mask)
- device->dev.dma_mask = parent->dma_mask;
- if (!device->dev.coherent_dma_mask)
- device->dev.coherent_dma_mask = parent->coherent_dma_mask;
+ WARN_ON_ONCE(device->dma_device);
+ if (device->dev.dma_ops) {
+ /*
+ * The caller provided custom DMA operations. Copy the
+ * DMA-related fields that are used by e.g. dma_alloc_coherent()
+ * into device->dev.
+ */
+ device->dma_device = &device->dev;
+ if (!device->dev.dma_mask)
+ device->dev.dma_mask = parent->dma_mask;
+ if (!device->dev.coherent_dma_mask)
+ device->dev.coherent_dma_mask =
+ parent->coherent_dma_mask;
+ } else {
+ /*
+ * The caller did not provide custom DMA operations. Use the
+ * DMA mapping operations of the parent device.
+ */
+ device->dma_device = parent;
+ }
mutex_lock(&device_mutex);
@@ -1015,8 +1029,7 @@ static int __init ib_core_init(void)
return -ENOMEM;
ib_comp_wq = alloc_workqueue("ib-comp-wq",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- WQ_UNBOUND_MAX_ACTIVE);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!ib_comp_wq) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0f5d43d..70c3e9e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
return NOTIFY_DONE;
iwdev = &hdl->device;
+ if (iwdev->init_state < INET_NOTIFIER)
+ return NOTIFY_DONE;
+
netdev = iwdev->ldev->netdev;
upper_dev = netdev_master_upper_dev_get(netdev);
if (netdev != event_netdev)
@@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_DONE;
iwdev = &hdl->device;
+ if (iwdev->init_state < INET_NOTIFIER)
+ return NOTIFY_DONE;
+
netdev = iwdev->ldev->netdev;
if (netdev != event_netdev)
return NOTIFY_DONE;
@@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
if (!iwhdl)
return NOTIFY_DONE;
iwdev = &iwhdl->device;
+ if (iwdev->init_state < INET_NOTIFIER)
+ return NOTIFY_DONE;
p = (__be32 *)neigh->primary_key;
i40iw_copy_ip_ntohl(local_ipaddr, p);
if (neigh->nud_state & NUD_VALID) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index bc9fb14..c52edea 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
return 0;
}
-static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
bool dpp_pool)
{
int status;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 12c4208..af9f596 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
unsigned long flags;
while (wait) {
- unsigned long shadow;
+ unsigned long shadow = 0;
int cstart, previ = -1;
/*
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 3cd96c1..9fbe22d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -69,6 +69,9 @@
*/
#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
+#define PVRDMA_NUM_RING_PAGES 4
+#define PVRDMA_QP_NUM_HEADER_PAGES 1
+
struct pvrdma_dev;
struct pvrdma_page_dir {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index e69d6f3..09078cc 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -132,7 +132,7 @@ enum pvrdma_pci_resource {
enum pvrdma_device_ctl {
PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */
- PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */
+ PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */
PVRDMA_DEVICE_CTL_RESET, /* Reset device. */
};
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 100bea5..34ebc76 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -56,7 +56,7 @@
#include "pvrdma.h"
#define DRV_NAME "vmw_pvrdma"
-#define DRV_VERSION "1.0.0.0-k"
+#define DRV_VERSION "1.0.1.0-k"
static DEFINE_MUTEX(pvrdma_device_list_lock);
static LIST_HEAD(pvrdma_device_list);
@@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
break;
case NETDEV_UP:
- pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ pvrdma_write_reg(dev, PVRDMA_REG_CTL,
+ PVRDMA_DEVICE_CTL_UNQUIESCE);
+
+ mb();
+
+ if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
+ dev_err(&dev->pdev->dev,
+ "failed to activate device during link up\n");
+ else
+ pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break;
default:
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
@@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev->dsr->resp_slot_dma = (u64)slot_dma;
/* Async event ring */
- dev->dsr->async_ring_pages.num_pages = 4;
+ dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
dev->dsr->async_ring_pages.num_pages, true);
if (ret)
@@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
/* CQ notification ring */
- dev->dsr->cq_ring_pages.num_pages = 4;
+ dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
dev->dsr->cq_ring_pages.num_pages, true);
if (ret)
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index dbbfd35..30062aad 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
sizeof(struct pvrdma_sge) *
qp->sq.max_sg);
/* Note: one extra page for the header. */
- qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
- PAGE_SIZE - 1) / PAGE_SIZE;
+ qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
+ (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
return 0;
}
@@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages = qp->npages_send + qp->npages_recv;
/* Skip header page. */
- qp->sq.offset = PAGE_SIZE;
+ qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
/* Recv queue pages are after send pages. */
qp->rq.offset = qp->npages_send * PAGE_SIZE;
@@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
cmd->total_chunks = qp->npages;
- cmd->send_chunks = qp->npages_send - 1;
+ cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
cmd->pdir_dma = qp->pdir.dir_dma;
dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
@@ -554,13 +555,13 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return ret;
}
-static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
{
return pvrdma_page_dir_get_ptr(&qp->pdir,
qp->sq.offset + n * qp->sq.wqe_size);
}
-static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
{
return pvrdma_page_dir_get_ptr(&qp->pdir,
qp->rq.offset + n * qp->rq.wqe_size);
@@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
struct pvrdma_sq_wqe_hdr *wqe_hdr;
struct pvrdma_sge *sge;
- int i, index;
- int nreq;
- int ret;
+ int i, ret;
/*
* In states lower than RTS, we can fail immediately. In other states,
@@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- unsigned int tail;
+ while (wr) {
+ unsigned int tail = 0;
if (unlikely(!pvrdma_idx_ring_has_space(
qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
@@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
}
- wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
+ wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
memset(wqe_hdr, 0, sizeof(*wqe_hdr));
wqe_hdr->wr_id = wr->wr_id;
wqe_hdr->num_sge = wr->num_sge;
@@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
/* Make sure wqe is written before index update */
smp_wmb();
- index++;
- if (unlikely(index >= qp->sq.wqe_cnt))
- index = 0;
/* Update shared sq ring */
pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
qp->sq.wqe_cnt);
+
+ wr = wr->next;
}
ret = 0;
@@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct pvrdma_qp *qp = to_vqp(ibqp);
struct pvrdma_rq_wqe_hdr *wqe_hdr;
struct pvrdma_sge *sge;
- int index, nreq;
int ret = 0;
int i;
@@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
- index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- unsigned int tail;
+ while (wr) {
+ unsigned int tail = 0;
if (unlikely(wr->num_sge > qp->rq.max_sg ||
wr->num_sge < 0)) {
@@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
goto out;
}
- wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
+ wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
wqe_hdr->wr_id = wr->wr_id;
wqe_hdr->num_sge = wr->num_sge;
wqe_hdr->total_len = 0;
@@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* Make sure wqe is written before index update */
smp_wmb();
- index++;
- if (unlikely(index >= qp->rq.wqe_cnt))
- index = 0;
/* Update shared rq ring */
pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
qp->rq.wqe_cnt);
+
+ wr = wr->next;
}
spin_unlock_irqrestore(&qp->rq.lock, flags);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index e202b81..6b712ee 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
spin_lock_irq(&rdi->mmap_offset_lock);
if (rdi->mmap_offset == 0)
- rdi->mmap_offset = PAGE_SIZE;
+ rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
ip->offset = rdi->mmap_offset;
- rdi->mmap_offset += size;
+ rdi->mmap_offset += ALIGN(size, SHMLBA);
spin_unlock_irq(&rdi->mmap_offset_lock);
INIT_LIST_HEAD(&ip->pending_mmaps);
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 7d1ac27..6332ded 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -22,4 +22,4 @@
To configure and work with soft-RoCE driver please use the
following wiki page under "configure Soft-RoCE (RXE)" section:
- https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
+ https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index c572a4c..bd812e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
spin_lock_bh(&rxe->mmap_offset_lock);
if (rxe->mmap_offset == 0)
- rxe->mmap_offset = PAGE_SIZE;
+ rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
ip->info.offset = rxe->mmap_offset;
- rxe->mmap_offset += size;
+ rxe->mmap_offset += ALIGN(size, SHMLBA);
spin_unlock_bh(&rxe->mmap_offset_lock);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index dbfde0d..9f95f50 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -729,11 +729,11 @@ int rxe_requester(void *arg)
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
if (ret) {
qp->need_req_skb = 1;
- kfree_skb(skb);
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) {
+ kfree_skb(skb);
rxe_run_task(&qp->req.task, 1);
goto exit;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index d404a8a..c9dd385 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
WARN_ON_ONCE(1);
}
- /* We successfully processed this new request. */
- qp->resp.msn++;
-
/* next expected psn, read handles this separately */
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
- if (pkt->mask & RXE_COMP_MASK)
+ if (pkt->mask & RXE_COMP_MASK) {
+ /* We successfully processed this new request. */
+ qp->resp.msn++;
return RESPST_COMPLETE;
- else if (qp_type(qp) == IB_QPT_RC)
+ } else if (qp_type(qp) == IB_QPT_RC)
return RESPST_ACKNOWLEDGE;
else
return RESPST_CLEANUP;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d0b22a..c1ae4ae 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -430,6 +430,7 @@ struct iser_fr_desc {
struct list_head list;
struct iser_reg_resources rsc;
struct iser_pi_context *pi_ctx;
+ struct list_head all_list;
};
/**
@@ -443,6 +444,7 @@ struct iser_fr_pool {
struct list_head list;
spinlock_t lock;
int size;
+ struct list_head all_list;
};
/**
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 30b622f..c538a38 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
int i, ret;
INIT_LIST_HEAD(&fr_pool->list);
+ INIT_LIST_HEAD(&fr_pool->all_list);
spin_lock_init(&fr_pool->lock);
fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
@@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
}
list_add_tail(&desc->list, &fr_pool->list);
+ list_add_tail(&desc->all_list, &fr_pool->all_list);
fr_pool->size++;
}
@@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
struct iser_fr_desc *desc, *tmp;
int i = 0;
- if (list_empty(&fr_pool->list))
+ if (list_empty(&fr_pool->all_list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
- list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
- list_del(&desc->list);
+ list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
+ list_del(&desc->all_list);
iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
iser_free_pi_ctx(desc->pi_ctx);
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27..db64adf 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
epirq = &interface->endpoint[0].desc;
epout = &interface->endpoint[1].desc;
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9cc6d05..23c191a 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf,
int error = -ENOMEM;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36..f4e8fbe 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
pcu->ep_ctrl = &alt->endpoint[0].desc;
pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c..6e7ff95 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
int ret, pipe, i;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 72b28eb..f210e19 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
/* handle buttons */
if (pkt_id == SS4_PACKET_ID_STICK) {
f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
- if (!(priv->flags & ALPS_BUTTONPAD)) {
- f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
- f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
- }
+ f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+ f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
} else {
f->left = !!(SS4_BTN_V2(p) & 0x01);
if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4],
int num_y_electrode;
int x_pitch, y_pitch, x_phys, y_phys;
- num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
- num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ num_x_electrode =
+ SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
+ num_y_electrode =
+ SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
- priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
- priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+ priv->x_max =
+ (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
+ priv->y_max =
+ (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
- x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
- y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+ x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+ y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+
+ } else {
+ num_x_electrode =
+ SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
+ num_y_electrode =
+ SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+
+ priv->x_max =
+ (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+ priv->y_max =
+ (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+
+ x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
+ y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+ }
x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
{
unsigned char is_btnless;
- is_btnless = (otp[1][1] >> 3) & 0x01;
+ if (IS_SS4PLUS_DEV(priv->dev_id))
+ is_btnless = (otp[1][0] >> 1) & 0x01;
+ else
+ is_btnless = (otp[1][1] >> 3) & 0x01;
if (is_btnless)
priv->flags |= ALPS_BUTTONPAD;
@@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
return 0;
}
+static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
+ struct alps_data *priv)
+{
+ bool is_dual = false;
+
+ if (IS_SS4PLUS_DEV(priv->dev_id))
+ is_dual = (otp[0][0] >> 4) & 0x01;
+
+ if (is_dual)
+ priv->flags |= ALPS_DUALPOINT |
+ ALPS_DUALPOINT_WITH_PRESSURE;
+
+ return 0;
+}
+
static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
struct alps_data *priv)
{
@@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
alps_update_btn_info_ss4_v2(otp, priv);
+ alps_update_dual_info_ss4_v2(otp, priv);
+
return 0;
}
@@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
if (alps_set_defaults_ss4_v2(psmouse, priv))
return -EIO;
- if (priv->fw_ver[1] == 0x1)
- priv->flags |= ALPS_DUALPOINT |
- ALPS_DUALPOINT_WITH_PRESSURE;
-
break;
}
@@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
ec[2] >= 0x90 && ec[2] <= 0x9d) {
protocol = &alps_v3_protocol_data;
} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
- e7[2] == 0x14 && ec[1] == 0x02) {
- protocol = &alps_v8_protocol_data;
- } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
- e7[2] == 0x28 && ec[1] == 0x01) {
+ (e7[2] == 0x14 || e7[2] == 0x28)) {
protocol = &alps_v8_protocol_data;
} else {
psmouse_dbg(psmouse,
@@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
}
if (priv) {
- /* Save the Firmware version */
+ /* Save Device ID and Firmware version */
+ memcpy(priv->dev_id, e7, 3);
memcpy(priv->fw_ver, ec, 3);
error = alps_set_protocol(psmouse, priv, protocol);
if (error)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 6d279aa..4334f28 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,6 +54,16 @@ enum SS4_PACKET_ID {
#define SS4_MASK_NORMAL_BUTTONS 0x07
+#define SS4PLUS_COUNT_PER_ELECTRODE 128
+#define SS4PLUS_NUMSENSOR_XOFFSET 16
+#define SS4PLUS_NUMSENSOR_YOFFSET 5
+#define SS4PLUS_MIN_PITCH_MM 37
+
+#define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \
+ ((_b[1]) == 0x03) && \
+ ((_b[2]) == 0x28) \
+ )
+
#define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \
((_b[1]) == 0x10) && \
((_b[2]) == 0x00) && \
@@ -283,6 +293,7 @@ struct alps_data {
int addr_command;
u16 proto_version;
u8 byte0, mask0;
+ u8 dev_id[3];
u8 fw_ver[3];
int flags;
int x_max;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 352050e..d5ab9dd 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data)
static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
{
- if (data->ic_type != 0x0E)
- return false;
-
- switch (data->product_id) {
- case 0x05 ... 0x07:
- case 0x09:
- case 0x13:
+ if (data->ic_type == 0x0E) {
+ switch (data->product_id) {
+ case 0x05 ... 0x07:
+ case 0x09:
+ case 0x13:
+ return true;
+ }
+ } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+ /* ASUS EeeBook X205TA */
return true;
- default:
- return false;
}
+
+ return false;
}
static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 1986786..34dfee5 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn)
rmi_get_platform_data(fn->rmi_dev);
int error;
+ /* can happen if f30_data.disable is set */
+ if (!f30)
+ return 0;
+
if (pdata->f30_data.trackstick_buttons) {
/* Try [re-]establish link to F03. */
f30->f03 = rmi_find_function(fn->rmi_dev, 0x03);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 05afd16..312bd6c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Dell Embedded Box PC 3000 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ },
+ },
+ {
/* OQO Model 01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
},
},
+ {
+ /* TUXEDO BU1406 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd85205..df4bea9 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id
int error;
int i;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
input_dev = input_allocate_device();
if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index e850d7e..4d9d649 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
struct input_dev *input_dev;
int error = -ENOMEM;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index aefb6e1..4c0eeca 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface,
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 5)
+ return -ENODEV;
+
/* Use endpoint #4 (0x86). */
endpoint = &iface_desc->endpoint[4].desc;
if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 98940d1..63cacf5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1234,7 +1234,7 @@ static void __domain_flush_pages(struct protection_domain *domain,
build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
- for (i = 0; i < amd_iommus_present; ++i) {
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i])
continue;
@@ -1278,7 +1278,7 @@ static void domain_flush_complete(struct protection_domain *domain)
{
int i;
- for (i = 0; i < amd_iommus_present; ++i) {
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (domain && !domain->dev_iommu[i])
continue;
@@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
region = iommu_alloc_resv_region(MSI_RANGE_START,
MSI_RANGE_END - MSI_RANGE_START + 1,
- 0, IOMMU_RESV_RESERVED);
+ 0, IOMMU_RESV_MSI);
if (!region)
return;
list_add_tail(®ion->list, head);
@@ -3363,7 +3363,7 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
* IOMMU TLB needs to be flushed before Device TLB to
* prevent device TLB refill from IOMMU TLB
*/
- for (i = 0; i < amd_iommus_present; ++i) {
+ for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (domain->dev_iommu[i] == 0)
continue;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6130278..5a11328 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -167,7 +167,9 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
/* Array to assign indices to IOMMUs*/
struct amd_iommu *amd_iommus[MAX_IOMMUS];
-int amd_iommus_present;
+
+/* Number of IOMMUs present in the system */
+static int amd_iommus_present;
/* IOMMUs have a non-present cache? */
bool amd_iommu_np_cache __read_mostly;
@@ -254,10 +256,6 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
-static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
- u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write);
-
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@@ -272,6 +270,11 @@ static inline unsigned long tbl_size(int entry_size)
return 1UL << shift;
}
+int amd_iommu_get_num_iommus(void)
+{
+ return amd_iommus_present;
+}
+
/* Access to l1 and l2 indexed register spaces */
static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@@ -1336,7 +1339,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
- iommu->index = amd_iommus_present++;
+ iommu->index = amd_iommus_present++;
if (unlikely(iommu->index >= MAX_IOMMUS)) {
WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
@@ -1477,6 +1480,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write);
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
@@ -1488,8 +1493,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
- if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
- (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
+ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
+ (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@@ -2711,6 +2716,18 @@ bool amd_iommu_v2_supported(void)
}
EXPORT_SYMBOL(amd_iommu_v2_supported);
+struct amd_iommu *get_amd_iommu(unsigned int idx)
+{
+ unsigned int i = 0;
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ if (i++ == idx)
+ return iommu;
+ return NULL;
+}
+EXPORT_SYMBOL(get_amd_iommu);
+
/****************************************************************************
*
* IOMMU EFR Performance Counter support functionality. This code allows
@@ -2718,17 +2735,14 @@ EXPORT_SYMBOL(amd_iommu_v2_supported);
*
****************************************************************************/
-u8 amd_iommu_pc_get_max_banks(u16 devid)
+u8 amd_iommu_pc_get_max_banks(unsigned int idx)
{
- struct amd_iommu *iommu;
- u8 ret = 0;
+ struct amd_iommu *iommu = get_amd_iommu(idx);
- /* locate the iommu governing the devid */
- iommu = amd_iommu_rlookup_table[devid];
if (iommu)
- ret = iommu->max_banks;
+ return iommu->max_banks;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
@@ -2738,62 +2752,69 @@ bool amd_iommu_pc_supported(void)
}
EXPORT_SYMBOL(amd_iommu_pc_supported);
-u8 amd_iommu_pc_get_max_counters(u16 devid)
+u8 amd_iommu_pc_get_max_counters(unsigned int idx)
{
- struct amd_iommu *iommu;
- u8 ret = 0;
+ struct amd_iommu *iommu = get_amd_iommu(idx);
- /* locate the iommu governing the devid */
- iommu = amd_iommu_rlookup_table[devid];
if (iommu)
- ret = iommu->max_counters;
+ return iommu->max_counters;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
-static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
- u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write)
+static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
+ u8 fxn, u64 *value, bool is_write)
{
u32 offset;
u32 max_offset_lim;
- /* Check for valid iommu and pc register indexing */
- if (WARN_ON((fxn > 0x28) || (fxn & 7)))
+ /* Make sure the IOMMU PC resource is available */
+ if (!amd_iommu_pc_present)
return -ENODEV;
- offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
+ /* Check for valid iommu and pc register indexing */
+ if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
+ return -ENODEV;
+
+ offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
/* Limit the offset to the hw defined mmio region aperture */
- max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) |
+ max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
(iommu->max_counters << 8) | 0x28);
if ((offset < MMIO_CNTR_REG_OFFSET) ||
(offset > max_offset_lim))
return -EINVAL;
if (is_write) {
- writel((u32)*value, iommu->mmio_base + offset);
- writel((*value >> 32), iommu->mmio_base + offset + 4);
+ u64 val = *value & GENMASK_ULL(47, 0);
+
+ writel((u32)val, iommu->mmio_base + offset);
+ writel((val >> 32), iommu->mmio_base + offset + 4);
} else {
*value = readl(iommu->mmio_base + offset + 4);
*value <<= 32;
- *value = readl(iommu->mmio_base + offset);
+ *value |= readl(iommu->mmio_base + offset);
+ *value &= GENMASK_ULL(47, 0);
}
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write)
+int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ return -EINVAL;
- /* Make sure the IOMMU PC resource is available */
- if (!amd_iommu_pc_present || iommu == NULL)
- return -ENODEV;
-
- return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
- value, is_write);
+ return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
}
+EXPORT_SYMBOL(amd_iommu_pc_get_reg);
+
+int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
+{
+ if (!iommu)
+ return -EINVAL;
+
+ return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
+}
+EXPORT_SYMBOL(amd_iommu_pc_set_reg);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 7eb60c1..466260f 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -21,6 +21,7 @@
#include "amd_iommu_types.h"
+extern int amd_iommu_get_num_iommus(void);
extern int amd_iommu_init_dma_ops(void);
extern int amd_iommu_init_passthrough(void);
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
@@ -56,13 +57,6 @@ extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
-/* IOMMU Performance Counter functions */
-extern bool amd_iommu_pc_supported(void);
-extern u8 amd_iommu_pc_get_max_banks(u16 devid);
-extern u8 amd_iommu_pc_get_max_counters(u16 devid);
-extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
- u64 *value, bool is_write);
-
#ifdef CONFIG_IRQ_REMAP
extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
#else
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 003f3ce..4de8f41 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -611,9 +611,6 @@ extern struct list_head amd_iommu_list;
*/
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
-/* Number of IOMMUs present in the system */
-extern int amd_iommus_present;
-
/*
* Declarations for the global list of all protection domains
*/
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5806a6a..591bb96 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
- prot, IOMMU_RESV_MSI);
+ prot, IOMMU_RESV_SW_MSI);
if (!region)
return;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index abf6496..b493c99 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev,
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
- prot, IOMMU_RESV_MSI);
+ prot, IOMMU_RESV_SW_MSI);
if (!region)
return;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index a7e0821..c01bfcd 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
spin_lock_irqsave(&data->lock, flags);
if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
clk_enable(data->clk_master);
- __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ if (sysmmu_block(data)) {
+ if (data->version >= MAKE_MMU_VER(5, 0))
+ __sysmmu_tlb_invalidate(data);
+ else
+ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ sysmmu_unblock(data);
+ }
clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 238ad34..d412a31 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
* which we used for the IOMMU lookup. Strictly speaking
* we could do this for all PCI devices; we only need to
* get the BDF# from the scope table for ACPI matches. */
- if (pdev->is_virtfn)
+ if (pdev && pdev->is_virtfn)
goto got_pdev;
*bus = drhd->devices[i].bus;
@@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device,
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
- 0, IOMMU_RESV_RESERVED);
+ 0, IOMMU_RESV_MSI);
if (!reg)
return;
list_add_tail(®->list, head);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 1c049e2..8d6ca28 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
pte |= ARM_V7S_ATTR_NS_TABLE;
__arm_v7s_set_pte(ptep, pte, 1, cfg);
- } else {
+ } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
cptep = iopte_deref(pte, lvl);
+ } else {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
}
/* Rinse, repeat */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index feacc54..f9bc6eb 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
__arm_lpae_set_pte(ptep, pte, cfg);
- } else {
+ } else if (!iopte_leaf(pte, lvl)) {
cptep = iopte_deref(pte, data);
+ } else {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
}
/* Rinse, repeat */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8ea14f4..3b67144 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = {
[IOMMU_RESV_DIRECT] = "direct",
[IOMMU_RESV_RESERVED] = "reserved",
[IOMMU_RESV_MSI] = "msi",
+ [IOMMU_RESV_SW_MSI] = "msi",
};
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
@@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
}
struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
- size_t length,
- int prot, int type)
+ size_t length, int prot,
+ enum iommu_resv_type type)
{
struct iommu_resv_region *region;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117..321ecac 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (ret == -EAGAIN) {
- memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
s = read_resync_info(mddev, bm_lockres);
if (s) {
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
dlm_release_lockspace(cinfo->lockspace, 2);
+ kfree(cinfo);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 548d1b8..f6ae1d6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);
-void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct mddev *mddev = cb->data;
- md_wakeup_thread(mddev->thread);
- kfree(cb);
-}
-EXPORT_SYMBOL(md_unplug);
-
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
do {
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
- mddev->max_disks = MD_SB_DISKS;
-
if (mddev->persistent) {
- mddev->flags = 0;
- mddev->sb_flags = 0;
+ mddev->max_disks = MD_SB_DISKS;
+ mddev->flags = 0;
+ mddev->sb_flags = 0;
}
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
- if (!rv)
- revalidate_disk(mddev->gendisk);
+ if (!rv) {
+ if (mddev->queue) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
+ }
return rv;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b8859cb..dde8ecb 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
-static inline int mddev_check_plugged(struct mddev *mddev)
-{
- return !!blk_check_plugged(md_unplug, mddev,
- sizeof(struct blk_plug_cb));
-}
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fbc2d78..a34f587 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
static void freeze_array(struct r1conf *conf, int extra)
{
/* Stop sync I/O and normal I/O and wait for everything to
- * go quite.
+ * go quiet.
* This is called in two situations:
* 1) management command handlers (reshape, remove disk, quiesce).
* 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
- if (bio_data_dir(split) == READ)
+ if (bio_data_dir(split) == READ) {
raid1_read_request(mddev, split);
- else
+
+ /*
+ * If a bio is splitted, the first part of bio will
+ * pass barrier but the bio is queued in
+ * current->bio_list (see generic_make_request). If
+ * there is a raise_barrier() called here, the second
+ * part of bio can't pass barrier. But since the first
+ * part bio isn't dispatched to underlaying disks yet,
+ * the barrier is never released, hence raise_barrier
+ * will alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write
+ * path, the first part of bio is dispatched in a
+ * schedule() call (because of blk plug) or offloaded
+ * to raid10d.
+ * Quitting from the function immediately can change
+ * the bio order queued in bio_list and avoid the deadlock.
+ */
+ if (split != bio) {
+ generic_make_request(bio);
+ break;
+ }
+ } else
raid1_write_request(mddev, split);
} while (split != bio);
}
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0536658..e89a8d7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1478,11 +1478,24 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_bdev = (void*)rdev;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
}
@@ -1572,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
+ /*
+ * If a bio is splitted, the first part of bio will pass
+ * barrier but the bio is queued in current->bio_list (see
+ * generic_make_request). If there is a raise_barrier() called
+ * here, the second part of bio can't pass barrier. But since
+ * the first part bio isn't dispatched to underlaying disks
+ * yet, the barrier is never released, hence raise_barrier will
+ * alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write path, the
+ * first part of bio is dispatched in a schedule() call
+ * (because of blk plug) or offloaded to raid10d.
+ * Quitting from the function immediately can change the bio
+ * order queued in bio_list and avoid the deadlock.
+ */
__make_request(mddev, split);
+ if (split != bio && bio_data_dir(bio) == READ) {
+ generic_make_request(bio);
+ break;
+ }
} while (split != bio);
/* In case raid10d snuck in to freeze_array */
@@ -3944,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, size);
- if (mddev->queue) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
- }
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > oldsize) {
mddev->recovery_cp = oldsize;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4fb09b3..ed5cd70 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
(test_bit(R5_Wantdrain, &dev->flags) ||
test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written)) {
+ (dev->written ||
+ test_bit(R5_InJournal, &dev->flags)))) {
if (test_bit(R5_InJournal, &dev->flags))
srcs[slot] = sh->dev[i].orig_page;
else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 67fd8ff..669a4c8 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
-static const struct platform_driver vdoa_driver = {
+static struct platform_driver vdoa_driver = {
.probe = vdoa_probe,
.remove = vdoa_remove,
.driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index cbb0376..0f0c389 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 8236081..7918b92 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -632,8 +632,8 @@ static int bdisp_open(struct file *file)
error_ctrls:
bdisp_ctrls_delete(ctx);
-error_fh:
v4l2_fh_del(&ctx->fh);
+error_fh:
v4l2_fh_exit(&ctx->fh);
bdisp_hw_free_nodes(ctx);
mem_ctx:
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index ab98660..04033ef 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
struct hexline *hx;
- u8 reset;
- int ret,pos=0;
+ u8 *buf;
+ int ret, pos = 0;
+ u16 cpu_cs_register = cypress[type].cpu_cs_register;
- hx = kmalloc(sizeof(*hx), GFP_KERNEL);
- if (!hx)
+ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
+ hx = (struct hexline *)buf;
/* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ buf[0] = 1;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
@@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
- kfree(hx);
+ kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
- reset = 0;
- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+ buf[0] = 0;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
- kfree(hx);
+ kfree(buf);
return ret;
}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 5457c36..bf0fe01 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
if (!of_property_read_u32(child, "dma-channel", &val))
gpmc_onenand_data->dma_channel = val;
- gpmc_onenand_init(gpmc_onenand_data);
-
- return 0;
+ return gpmc_onenand_init(gpmc_onenand_data);
}
#else
static int gpmc_probe_onenand_child(struct platform_device *pdev,
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 91f6459..b27ea98 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
/* If we're permanently dead, give up. */
if (state == pci_channel_io_perm_failure) {
- /* Tell the AFU drivers; but we don't care what they
- * say, we're going away.
- */
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
- /* Only participate in EEH if we are on a virtual PHB */
- if (afu->phb == NULL)
- return PCI_ERS_RESULT_NONE;
- cxl_vphb_error_detected(afu, state);
+ /*
+ * Tell the AFU drivers; but we don't care what they
+ * say, we're going away.
+ */
+ if (afu->phb != NULL)
+ cxl_vphb_error_detected(afu, state);
}
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 3600c99..29f2dae 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -112,11 +112,9 @@ struct mkhi_msg {
static int mei_osver(struct mei_cl_device *cldev)
{
- int ret;
const size_t size = sizeof(struct mkhi_msg_hdr) +
sizeof(struct mkhi_fwcaps) +
sizeof(struct mei_os_ver);
- size_t length = 8;
char buf[size];
struct mkhi_msg *req;
struct mkhi_fwcaps *fwcaps;
@@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev)
os_ver = (struct mei_os_ver *)fwcaps->data;
os_ver->os_type = OSTYPE_LINUX;
- ret = __mei_cl_send(cldev->cl, buf, size, mode);
- if (ret < 0)
- return ret;
-
- ret = __mei_cl_recv(cldev->cl, buf, length, 0);
- if (ret < 0)
- return ret;
-
- return 0;
+ return __mei_cl_send(cldev->cl, buf, size, mode);
}
static void mei_mkhi_fix(struct mei_cl_device *cldev)
@@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
return;
ret = mei_osver(cldev);
- if (ret)
+ if (ret < 0)
dev_err(&cldev->dev, "OS version command failed %d\n", ret);
mei_cldev_disable(cldev);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index cfb1cdf..13c55b8 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev)
mei_clear_interrupts(dev);
- mei_synchronize_irq(dev);
-
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
* we need to call it before hw start
@@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work)
container_of(work, struct mei_device, reset_work);
int ret;
+ mei_clear_interrupts(dev);
+ mei_synchronize_irq(dev);
+
mutex_lock(&dev->device_lock);
ret = mei_reset(dev);
@@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev)
mei_cancel_work(dev);
+ mei_clear_interrupts(dev);
+ mei_synchronize_irq(dev);
+
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_DOWN;
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 9d65954..dad5abe 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
*/
error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
PCI_IRQ_MSIX);
- if (error) {
+ if (error < 0) {
error = pci_alloc_irq_vectors(pdev, 1, 1,
PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
- if (error)
+ if (error < 0)
goto err_remove_bitmap;
} else {
vmci_dev->exclusive_vectors = true;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 1621fa0..ff3da96 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
bool old_req_pending)
{
- struct mmc_queue_req *mq_rq;
bool req_pending;
- mq_rq = container_of(brq, struct mmc_queue_req, brq);
-
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
@@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
case MMC_BLK_CMD_ERR:
req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
if (mmc_blk_reset(md, card->host, type)) {
- mmc_blk_rw_cmd_abort(card, old_req);
+ if (req_pending)
+ mmc_blk_rw_cmd_abort(card, old_req);
mmc_blk_rw_try_restart(mq, new_req);
return;
}
@@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_blk_issue_flush(mq, req);
} else {
mmc_blk_issue_rw_rq(mq, req);
+ card->host->context_info.is_waiting_last_req = false;
}
out:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7fd7228..b502601 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_hs400(card);
if (err)
goto free_card;
- } else {
+ } else if (!mmc_card_hs400es(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
if (err > 0 && mmc_card_hs(card)) {
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8e32580..b235d8d 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
}
sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | (div % 0xff));
+ (mode << 8) | div);
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
@@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = host->src_clk_freq / (4 * 255);
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b..1cfd7f9 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
#define VENDOR_ENHANCED_STROBE BIT(0)
-#define CLK_CTRL_TIMEOUT_SHIFT 16
-#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
-#define CLK_CTRL_TIMEOUT_MIN_EXP 13
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host,
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
{
- u32 div;
unsigned long freq;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
- div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+ /* SDHCI timeout clock is in kHz */
+ freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
- freq = clk_get_rate(pltfm_host->clk);
- freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+ /* or in MHz */
+ if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
+ freq = DIV_ROUND_UP(freq, 1000);
return freq;
}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2f9ad21..7fd9642 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -85,11 +85,30 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
+/*
+ * In this specific implementation of the SDHCI controller, the power register
+ * needs to have a valid voltage set even when the power supply is managed by
+ * an external regulator.
+ */
+static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
.set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_at91_set_power,
};
static const struct sdhci_pltfm_data soc_data_sama5d2 = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 982b3e3..86560d5 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
if (mode == MMC_POWER_OFF)
return;
+ spin_unlock_irq(&host->lock);
+
/*
* Bus power might not enable after D3 -> D0 transition due to the
* present state not yet having propagated. Retry for up to 2ms.
@@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
reg |= SDHCI_POWER_ON;
sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
}
+
+ spin_lock_irq(&host->lock);
}
static const struct sdhci_ops sdhci_intel_byt_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6fdd7a7..9c1a099 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
return;
}
timeout--;
- mdelay(1);
+ spin_unlock_irq(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock_irq(&host->lock);
}
clk |= SDHCI_CLOCK_CARD_EN;
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f..1d84335 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct ushc_data *ushc;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 8a280e7..127adbe 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -984,29 +984,29 @@
#define XP_ECC_CNT1_DESC_DED_WIDTH 8
#define XP_ECC_CNT1_DESC_SEC_INDEX 0
#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
-#define XP_ECC_IER_DESC_DED_INDEX 0
+#define XP_ECC_IER_DESC_DED_INDEX 5
#define XP_ECC_IER_DESC_DED_WIDTH 1
-#define XP_ECC_IER_DESC_SEC_INDEX 1
+#define XP_ECC_IER_DESC_SEC_INDEX 4
#define XP_ECC_IER_DESC_SEC_WIDTH 1
-#define XP_ECC_IER_RX_DED_INDEX 2
+#define XP_ECC_IER_RX_DED_INDEX 3
#define XP_ECC_IER_RX_DED_WIDTH 1
-#define XP_ECC_IER_RX_SEC_INDEX 3
+#define XP_ECC_IER_RX_SEC_INDEX 2
#define XP_ECC_IER_RX_SEC_WIDTH 1
-#define XP_ECC_IER_TX_DED_INDEX 4
+#define XP_ECC_IER_TX_DED_INDEX 1
#define XP_ECC_IER_TX_DED_WIDTH 1
-#define XP_ECC_IER_TX_SEC_INDEX 5
+#define XP_ECC_IER_TX_SEC_INDEX 0
#define XP_ECC_IER_TX_SEC_WIDTH 1
-#define XP_ECC_ISR_DESC_DED_INDEX 0
+#define XP_ECC_ISR_DESC_DED_INDEX 5
#define XP_ECC_ISR_DESC_DED_WIDTH 1
-#define XP_ECC_ISR_DESC_SEC_INDEX 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 4
#define XP_ECC_ISR_DESC_SEC_WIDTH 1
-#define XP_ECC_ISR_RX_DED_INDEX 2
+#define XP_ECC_ISR_RX_DED_INDEX 3
#define XP_ECC_ISR_RX_DED_WIDTH 1
-#define XP_ECC_ISR_RX_SEC_INDEX 3
+#define XP_ECC_ISR_RX_SEC_INDEX 2
#define XP_ECC_ISR_RX_SEC_WIDTH 1
-#define XP_ECC_ISR_TX_DED_INDEX 4
+#define XP_ECC_ISR_TX_DED_INDEX 1
#define XP_ECC_ISR_TX_DED_WIDTH 1
-#define XP_ECC_ISR_TX_SEC_INDEX 5
+#define XP_ECC_ISR_TX_SEC_INDEX 0
#define XP_ECC_ISR_TX_SEC_WIDTH 1
#define XP_I2C_MUTEX_BUSY_INDEX 31
#define XP_I2C_MUTEX_BUSY_WIDTH 1
@@ -1148,8 +1148,8 @@
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@@ -1158,6 +1158,8 @@
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 937f37a..24a687c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
if (rdata->rx.hdr_len)
pdata->ext_stats.rx_split_header_packets++;
+ } else {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 0);
}
/* Get the RSS hash */
@@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
}
}
- /* Get the packet length */
- rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
- if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
- /* Not all the data has been transferred for this packet */
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 1);
+ /* Not all the data has been transferred for this packet */
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
return 0;
- }
/* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 0);
+ LAST, 1);
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
/* Set checksum done indicator as appropriate */
if (netdev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index ffea985..a713abd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
{
struct sk_buff *skb;
u8 *packet;
- unsigned int copy_len;
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
if (!skb)
return NULL;
- /* Start with the header buffer which may contain just the header
+ /* Pull in the header buffer which may contain just the header
* or the header plus data
*/
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
- copy_len = min(rdata->rx.hdr.dma_len, copy_len);
- skb_copy_to_linear_data(skb, packet, copy_len);
- skb_put(skb, copy_len);
-
- len -= copy_len;
- if (len) {
- /* Add the remaining data as a frag */
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- len, rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
+ skb_copy_to_linear_data(skb, packet, len);
+ skb_put(skb, len);
return skb;
}
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet)
+{
+ /* Always zero if not the first descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+ return 0;
+
+ /* First descriptor with split header, return header length */
+ if (rdata->rx.hdr_len)
+ return rdata->rx.hdr_len;
+
+ /* First descriptor but not the last descriptor and no split header,
+ * so the full buffer was used
+ */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.hdr.dma_len;
+
+ /* First descriptor and last descriptor and no split header, so
+ * calculate how much of the buffer was used
+ */
+ return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet,
+ unsigned int len)
+{
+ /* Always the full buffer if not the last descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.buf.dma_len;
+
+ /* Last descriptor so calculate how much of the buffer was used
+ * for the last bit of data
+ */
+ return rdata->rx.len - len;
+}
+
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct napi_struct *napi;
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
- unsigned int incomplete, error, context_next, context;
- unsigned int len, rdesc_len, max_len;
+ unsigned int last, error, context_next, context;
+ unsigned int len, buf1_len, buf2_len, max_len;
unsigned int received = 0;
int packet_count = 0;
@@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
- incomplete = 0;
+ last = 0;
context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -2137,9 +2155,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
received++;
ring->cur++;
- incomplete = XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES,
- INCOMPLETE);
+ last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ LAST);
context_next = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT);
@@ -2148,7 +2165,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
CONTEXT);
/* Earlier error, just drain the remaining data */
- if ((incomplete || context_next) && error)
+ if ((!last || context_next) && error)
goto read_again;
if (error || packet->errors) {
@@ -2160,16 +2177,22 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
}
if (!context) {
- /* Length is cumulative, get this descriptor's length */
- rdesc_len = rdata->rx.len - len;
- len += rdesc_len;
+ /* Get the data length in the descriptor buffers */
+ buf1_len = xgbe_rx_buf1_len(rdata, packet);
+ len += buf1_len;
+ buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+ len += buf2_len;
- if (rdesc_len && !skb) {
+ if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
- rdesc_len);
- if (!skb)
+ buf1_len);
+ if (!skb) {
error = 1;
- } else if (rdesc_len) {
+ goto skip_data;
+ }
+ }
+
+ if (buf2_len) {
dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
@@ -2179,13 +2202,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
- rdesc_len,
+ buf2_len,
rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
- if (incomplete || context_next)
+skip_data:
+ if (!last || context_next)
goto read_again;
if (!skb)
@@ -2243,7 +2267,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
}
/* Check if we need to save state before leaving */
- if (received && (incomplete || context_next)) {
+ if (received && (!last || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
rdata->state.skb = skb;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index dad6362..d05fbfd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -98,6 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
if (err < 0)
goto err_exit;
+ ndev->mtu = new_mtu;
if (netif_running(ndev)) {
aq_ndev_close(ndev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
index 1093ea1..0592a03 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
.tx_rings = HW_ATL_A0_TX_RINGS,
.rx_rings = HW_ATL_A0_RX_RINGS,
.hw_features = NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_TSO,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index 8bdee3d..f3957e93 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
.tx_rings = HW_ATL_B0_TX_RINGS,
.rx_rings = HW_ATL_B0_RX_RINGS,
.hw_features = NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_TSO |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 69015fa..365895e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
- phy_suspend(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_suspend(priv->phydev);
netif_device_detach(dev);
@@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
- phy_resume(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_resume(priv->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e876076..2f92819 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
udelay(60);
}
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
-
- /* Power up PHY */
- bcmgenet_phy_power_set(dev, true);
- /* enable APD */
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_PWR_DN_EN_LD;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- bcmgenet_mii_reset(dev);
-}
-
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev)
if (priv->internal_phy) {
phy_name = "internal PHY";
- bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 05c1c1d..cebfe3b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
- if (rc < 2) {
+ if (rc < 2 || len > UINT_MAX >> 2) {
netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5f11b4d..b23d654 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
+ kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
}
@@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
+ kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e8c1051..0e0fa70 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev)
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
/* PCI might be offline */
+
+ /* If device removal has been requested,
+ * do not continue retrying.
+ */
+ if (dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_NOWAIT) {
+ mlx4_warn(dev,
+ "communication channel is offline\n");
+ return -EIO;
+ }
+
msleep(100);
wr_toggle = swab32(readl(&priv->mfunc.comm->
slave_write));
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 21377c3..7032054 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
(u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
if (!offline_bit)
return 0;
+
+ /* If device removal has been requested,
+ * do not continue retrying.
+ */
+ if (dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_NOWAIT)
+ break;
+
/* There are cases as part of AER/Reset flow that PF needs
* around 100 msec to load. We therefore sleep for 100 msec
* to allow other tasks to make use of that CPU during this
@@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
+ if (mlx4_is_slave(dev))
+ persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
+
mutex_lock(&persist->interface_state_mutex);
persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
mutex_unlock(&persist->interface_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index caa837e..a380353 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_SET_RATE_LIMIT:
+ case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+ MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(ALLOC_PD);
MLX5_COMMAND_STR_CASE(DEALLOC_PD);
MLX5_COMMAND_STR_CASE(ALLOC_UAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f6a6ded..dc52053 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
-void mlx5e_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti);
-void mlx5e_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti);
int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8ef64c4..66c1337 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
-void mlx5e_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static void mlx5e_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
}
-void mlx5e_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static void mlx5e_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c86457..f621373 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
.ndo_get_stats64 = mlx5e_rep_get_stats,
- .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
- .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3d37168..bafcb34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ /* Subtract one since we already counted this as one
+ * "regular" packet in mlx5e_complete_rx_cqe()
+ */
+ rq->stats.packets += lro_num_seg - 1;
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 79481f4..fade723 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -133,6 +133,23 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return rule;
}
+static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_fc *counter = NULL;
+
+ if (!IS_ERR(flow->rule)) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
+
+ if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
+ }
+}
+
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
@@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow) {
+ struct mlx5e_tc_flow *flow);
+
+static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
+
+ mlx5_eswitch_del_vlan_action(esw, flow->attr);
+
+ if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ mlx5e_detach_encap(priv, flow);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
struct list_head *next = flow->encap.next;
list_del(&flow->encap);
@@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_fc *counter = NULL;
-
- if (!IS_ERR(flow->rule)) {
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
- mlx5_fc_destroy(priv->mdev, counter);
- }
-
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- mlx5_eswitch_del_vlan_action(esw, flow->attr);
- if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
- mlx5e_detach_encap(priv, flow);
- }
-
- if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
- priv->fs.tc.t = NULL;
- }
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ mlx5e_tc_del_fdb_flow(priv, flow);
+ else
+ mlx5e_tc_del_nic_flow(priv, flow);
}
static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
@@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err;
- if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
+ if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
else {
@@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ struct mlx5e_priv *up_priv = netdev_priv(up_dev);
unsigned short family = ip_tunnel_info_af(tun_info);
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5_encap_entry *e;
@@ -996,7 +1020,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
+ if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
@@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
if (is_tcf_vlan(a)) {
- if (tcf_vlan_action(a) == VLAN_F_POP) {
+ if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+ } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan = tcf_vlan_push_vid(a);
+ } else { /* action is TCA_VLAN_ACT_MODIFY */
+ return -EOPNOTSUPP;
}
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f193128..57f5e2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_bytes += skb->len - ihs;
}
+ sq->stats.packets += skb_shinfo(skb)->gso_segs;
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else {
bf = sq->bf_budget &&
!skb->xmit_more &&
!skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+ sq->stats.packets++;
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
+ sq->stats.bytes += num_bytes;
wi->num_bytes = num_bytes;
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (bf)
sq->bf_budget--;
- sq->stats.packets++;
- sq->stats.bytes += num_bytes;
return NETDEV_TX_OK;
dma_unmap_wqe_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 5b78883..ad329b16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -209,6 +209,7 @@ struct mlx5_esw_offload {
struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8);
u8 inline_mode;
+ u64 num_flows;
};
struct mlx5_eswitch {
@@ -271,6 +272,11 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr);
+void
+mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr);
+
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4f5b0d4..307ec6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec, &flow_act, dest, i);
if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter);
+ else
+ esw->offloads.num_flows++;
return rule;
}
+void
+mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_fc *counter = NULL;
+
+ if (!IS_ERR(rule)) {
+ counter = mlx5_flow_rule_counter(rule);
+ mlx5_del_flow_rules(rule);
+ mlx5_fc_destroy(esw->dev, counter);
+ esw->offloads.num_flows--;
+ }
+}
+
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{
struct mlx5_eswitch_rep *rep;
@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
+ if (esw->offloads.num_flows > 0) {
+ esw_warn(dev, "Can't set inline mode when flows are configured\n");
+ return -EOPNOTSUPP;
+ }
+
err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e2bd600..60154a17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
- .log_max_qp = 17,
+ .log_max_qp = 18,
.mr_cache[0] = {
.size = 500,
.limit = 250
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 334bcc6..50d2826 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t
tnl.type = (u16)efx_tunnel_type;
tnl.port = ti->port;
- if (efx->type->udp_tnl_add_port)
+ if (efx->type->udp_tnl_del_port)
(void)efx->type->udp_tnl_del_port(efx, tnl);
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 296c8ef..9e63195 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -74,15 +74,21 @@
will be called cpsw.
config TI_CPTS
- tristate "TI Common Platform Time Sync (CPTS) Support"
+ bool "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW || TI_KEYSTONE_NETCP
- imply PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
---help---
This driver supports the Common Platform Time Sync unit of
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock.
+config TI_CPTS_MOD
+ tristate
+ depends on TI_CPTS
+ default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
+ default m
+
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
select TI_CPSW_ALE
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 1e7c10b..10e6b0c 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -12,7 +12,7 @@
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
-obj-$(CONFIG_TI_CPTS) += cpts.o
+obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
ti_cpsw-y := cpsw.o
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b75d9cd..ae48c80 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
+
static int fjes_request_irq(struct fjes_adapter *);
static void fjes_free_irq(struct fjes_adapter *);
@@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int);
static int fjes_poll(struct napi_struct *, int);
static const struct acpi_device_id fjes_acpi_ids[] = {
- {"PNP0C02", 0},
+ {ACPI_MOTHERBOARD_RESOURCE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
@@ -115,18 +117,17 @@ static struct resource fjes_resource[] = {
},
};
-static int fjes_acpi_add(struct acpi_device *device)
+static bool is_extended_socket_device(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
- struct platform_device *plat_dev;
union acpi_object *str;
acpi_status status;
int result;
status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return false;
str = buffer.pointer;
result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
@@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device)
if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
kfree(buffer.pointer);
- return -ENODEV;
+ return false;
}
kfree(buffer.pointer);
+ return true;
+}
+
+static int acpi_check_extended_socket_status(struct acpi_device *device)
+{
+ unsigned long long sta;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
+ (sta & ACPI_STA_DEVICE_ENABLED) &&
+ (sta & ACPI_STA_DEVICE_UI) &&
+ (sta & ACPI_STA_DEVICE_FUNCTIONING)))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int fjes_acpi_add(struct acpi_device *device)
+{
+ struct platform_device *plat_dev;
+ acpi_status status;
+
+ if (!is_extended_socket_device(device))
+ return -ENODEV;
+
+ if (acpi_check_extended_socket_status(device))
+ return -ENODEV;
+
status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
fjes_get_acpi_resource, fjes_resource);
if (ACPI_FAILURE(status))
@@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
netdev->min_mtu = fjes_support_mtu[0];
netdev->max_mtu = fjes_support_mtu[3];
netdev->flags |= IFF_BROADCAST;
- netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static void fjes_irq_watch_task(struct work_struct *work)
@@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
}
+static acpi_status
+acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
+ void *context, void **return_value)
+{
+ struct acpi_device *device;
+ bool *found = context;
+ int result;
+
+ result = acpi_bus_get_device(obj_handle, &device);
+ if (result)
+ return AE_OK;
+
+ if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
+ return AE_OK;
+
+ if (!is_extended_socket_device(device))
+ return AE_OK;
+
+ if (acpi_check_extended_socket_status(device))
+ return AE_OK;
+
+ *found = true;
+ return AE_CTRL_TERMINATE;
+}
+
/* fjes_init_module - Driver Registration Routine */
static int __init fjes_init_module(void)
{
+ bool found = false;
int result;
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ acpi_find_extended_socket_device, NULL, &found,
+ NULL);
+
+ if (!found)
+ return -ENODEV;
+
pr_info("%s - version %s - %s\n",
fjes_driver_string, fjes_driver_version, fjes_copyright);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4c1d8cc..8dd0b87 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context)
return;
net_device = net_device_to_netvsc_device(ndev);
- if (unlikely(net_device->destroy) &&
- netvsc_channel_idle(net_device, q_idx))
+ if (unlikely(!net_device))
+ return;
+
+ if (unlikely(net_device->destroy &&
+ netvsc_channel_idle(net_device, q_idx)))
return;
/* commit_rd_index() -> hv_signal_on_read() needs this. */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 34cc3c5..cc88cd7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
return -EINVAL;
tun->set_features = features;
+ tun->dev->wanted_features &= ~TUN_USER_FEATURES;
+ tun->dev->wanted_features |= features;
netdev_update_features(tun->dev);
return 0;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8056745..156f7f8 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -580,6 +580,10 @@ static const struct usb_device_id products[] = {
USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Motorola Mapphone devices with MDM6600 */
+ USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
/* 2. Combined interface devices matching on class+protocol */
{ /* Huawei E367 and possibly others in "Windows mode" */
@@ -925,6 +929,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 986243c..0b1b918 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "8"
+#define NET_VERSION "9"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -501,6 +501,8 @@ enum rtl_register_content {
#define RTL8153_RMS RTL8153_MAX_PACKET
#define RTL8152_TX_TIMEOUT (5 * HZ)
#define RTL8152_NAPI_WEIGHT 64
+#define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \
+ sizeof(struct rx_desc) + RX_ALIGN)
/* rtl8152 flags */
enum rtl8152_flags {
@@ -1362,6 +1364,7 @@ static int alloc_all_mem(struct r8152 *tp)
spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock);
INIT_LIST_HEAD(&tp->tx_free);
+ INIT_LIST_HEAD(&tp->rx_done);
skb_queue_head_init(&tp->tx_queue);
skb_queue_head_init(&tp->rx_queue);
@@ -2252,8 +2255,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
- u32 mtu = tp->netdev->mtu;
- u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
+ u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
@@ -2898,7 +2900,8 @@ static void r8153_first_init(struct r8152 *tp)
rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+ ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
@@ -2950,7 +2953,8 @@ static void r8153_enter_oob(struct r8152 *tp)
usleep_range(1000, 2000);
}
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+ ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
ocp_data &= ~TEREDO_WAKE_MASK;
@@ -4200,8 +4204,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
- if (netif_running(dev) && netif_carrier_ok(dev))
- r8153_set_rx_early_size(tp);
+ if (netif_running(dev)) {
+ u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE;
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms);
+
+ if (netif_carrier_ok(dev))
+ r8153_set_rx_early_size(tp);
+ }
mutex_unlock(&tp->control);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index fea687f..d6988db 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
}
if (rt6_local) {
- if (rt6_local->rt6i_idev)
+ if (rt6_local->rt6i_idev) {
in6_dev_put(rt6_local->rt6i_idev);
+ rt6_local->rt6i_idev = NULL;
+ }
dst = &rt6_local->dst;
dev_put(dst->dev);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 33fb268..d9f37ee 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = {
.rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000,
- .wlan_mac_base_address = 0x00020000,
+ .wlan_mac_base_address = 0x00010000,
.ce_wrapper_base_address = 0x00034000,
.ce0_base_address = 0x00034400,
.ce1_base_address = 0x00034800,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d37b169..6927cae 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* Called when we need to transmit (a) frame(s) from agg queue */
+ /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
tids, more_data, true);
@@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
- if (tid_data->state != IWL_AGG_ON &&
+ if (!iwl_mvm_is_dqa_supported(mvm) &&
+ tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index bd1dcc8..b51a285 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3135,7 +3135,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
u16 cnt, u16 tids, bool more_data,
- bool agg)
+ bool single_sta_queue)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {
@@ -3155,14 +3155,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
- /* If we're releasing frames from aggregation queues then check if the
- * all queues combined that we're releasing frames from have
+ /* If we're releasing frames from aggregation or dqa queues then check
+ * if all the queues that we're releasing frames from, combined, have:
* - more frames than the service period, in which case more_data
* needs to be set
* - fewer than 'cnt' frames, in which case we need to adjust the
* firmware command (but do that unconditionally)
*/
- if (agg) {
+ if (single_sta_queue) {
int remaining = cnt;
int sleep_tx_count;
@@ -3172,7 +3172,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
u16 n_queued;
tid_data = &mvmsta->tid_data[tid];
- if (WARN(tid_data->state != IWL_AGG_ON &&
+ if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
+ tid_data->state != IWL_AGG_ON &&
tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
"TID %d state is %d\n",
tid, tid_data->state)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4be34f9..1927ce6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
u16 cnt, u16 tids, bool more_data,
- bool agg);
+ bool single_sta_queue);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain);
void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dd2b4a3..3f37075 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -628,8 +629,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* values.
* Note that we don't need to make sure it isn't agg'd, since we're
* TXing non-sta
+ * For DQA mode - we shouldn't increase it though
*/
- atomic_inc(&mvm->pending_frames[sta_id]);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ atomic_inc(&mvm->pending_frames[sta_id]);
return 0;
}
@@ -1005,11 +1008,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_unlock(&mvmsta->lock);
- /* Increase pending frames count if this isn't AMPDU */
- if ((iwl_mvm_is_dqa_supported(mvm) &&
- mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
- mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
- (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
+ /* Increase pending frames count if this isn't AMPDU or DQA queue */
+ if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -1079,12 +1079,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
if ((tid_data->state == IWL_AGG_ON ||
- tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
+ iwl_mvm_is_dqa_supported(mvm)) &&
iwl_mvm_tid_queued(tid_data) == 0) {
/*
- * Now that this aggregation queue is empty tell mac80211 so it
- * knows we no longer have frames buffered for the station on
- * this TID (for the TIM bitmap calculation.)
+ * Now that this aggregation or DQA queue is empty tell
+ * mac80211 so it knows we no longer have frames buffered for
+ * the station on this TID (for the TIM bitmap calculation.)
*/
ieee80211_sta_set_buffered(sta, tid, false);
}
@@ -1257,7 +1258,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl;
bool is_ndp = false;
- bool txq_agg = false; /* Is this TXQ aggregated */
__skb_queue_head_init(&skbs);
@@ -1283,6 +1283,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
info->flags |= IEEE80211_TX_STAT_ACK;
break;
case TX_STATUS_FAIL_DEST_PS:
+ /* In DQA, the FW should have stopped the queue and not
+ * return this status
+ */
+ WARN_ON(iwl_mvm_is_dqa_supported(mvm));
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
break;
default:
@@ -1387,15 +1391,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
- if (iwl_mvm_is_dqa_supported(mvm)) {
- enum iwl_mvm_agg_state state;
-
- state = mvmsta->tid_data[tid].state;
- txq_agg = (state == IWL_AGG_ON ||
- state == IWL_EMPTYING_HW_QUEUE_DELBA);
- } else {
- txq_agg = txq_id >= mvm->first_agg_queue;
- }
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
@@ -1452,11 +1447,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_agg)
+ if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
goto out;
/* We can't free more than one frame at once on a shared queue */
- WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
+ WARN_ON(skb_freed > 1);
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 5ebca1d..b62e03d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
* In case of any errors during inittialization, this function also ensures
* proper cleanup before exiting.
*/
-static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
- void **padapter)
+static int mwifiex_register(void *card, struct device *dev,
+ struct mwifiex_if_ops *if_ops, void **padapter)
{
struct mwifiex_adapter *adapter;
int i;
@@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
return -ENOMEM;
*padapter = adapter;
+ adapter->dev = dev;
adapter->card = card;
/* Save interface specific operations in adapter */
@@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done,
{
struct mwifiex_adapter *adapter;
- if (mwifiex_register(card, if_ops, (void **)&adapter)) {
+ if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) {
pr_err("%s: software init failed\n", __func__);
goto err_init_sw;
}
- adapter->dev = dev;
mwifiex_probe_of(adapter);
adapter->iface_type = iface_type;
@@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
+ if (adapter->irq_wakeup >= 0)
+ device_init_wakeup(adapter->dev, false);
+
/* Unregister device */
mwifiex_dbg(adapter, INFO,
"info: unregister device\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index a0d9180..b8c990d 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
schedule_work(&card->work);
}
+static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ if (reg->sleep_cookie)
+ mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+ mwifiex_pcie_delete_txbd_ring(adapter);
+ card->cmdrsp_buf = NULL;
+}
+
/*
* This function initializes the PCI-E host memory space, WCB rings, etc.
*
@@ -2850,13 +2865,6 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
/*
* This function cleans up the allocated card buffers.
- *
- * The following are freed by this function -
- * - TXBD ring buffers
- * - RXBD ring buffers
- * - Event BD ring buffers
- * - Command response ring buffer
- * - Sleep cookie buffer
*/
static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
{
@@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
"Failed to write driver not-ready signature\n");
}
+ mwifiex_pcie_free_buffers(adapter);
+
if (pdev) {
pci_iounmap(pdev, card->pci_mmap);
pci_iounmap(pdev, card->pci_mmap1);
@@ -3126,10 +3136,7 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
pci_iounmap(pdev, card->pci_mmap1);
}
-/* This function cleans up the PCI-E host memory space.
- * Some code is extracted from mwifiex_unregister_dev()
- *
- */
+/* This function cleans up the PCI-E host memory space. */
static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
@@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
adapter->seq_num = 0;
- if (reg->sleep_cookie)
- mwifiex_pcie_delete_sleep_cookie_buf(adapter);
-
- mwifiex_pcie_delete_cmdrsp_buf(adapter);
- mwifiex_pcie_delete_evtbd_ring(adapter);
- mwifiex_pcie_delete_rxbd_ring(adapter);
- mwifiex_pcie_delete_txbd_ring(adapter);
- card->cmdrsp_buf = NULL;
+ mwifiex_pcie_free_buffers(adapter);
}
static struct mwifiex_if_ops pcie_ops = {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 779f516..47a479f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
struct ib_device *ibdev = dev->dev;
int ret;
- BUG_ON(queue_idx >= ctrl->queue_count);
-
ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
if (ret)
@@ -652,8 +650,22 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
int i, ret;
+ nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret)
+ return ret;
+
+ ctrl->queue_count = nr_io_queues + 1;
+ if (ctrl->queue_count < 2)
+ return 0;
+
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", nr_io_queues);
+
for (i = 1; i < ctrl->queue_count; i++) {
ret = nvme_rdma_init_queue(ctrl, i,
ctrl->ctrl.opts->queue_size);
@@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
{
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
int ret;
- ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
- if (ret)
- return ret;
-
- ctrl->queue_count = opts->nr_io_queues + 1;
- if (ctrl->queue_count < 2)
- return 0;
-
- dev_info(ctrl->ctrl.device,
- "creating %d I/O queues.\n", opts->nr_io_queues);
-
ret = nvme_rdma_init_io_queues(ctrl);
if (ret)
return ret;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 11b0a0a..798653b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
ctrl->sqs[qid] = sq;
}
+static void nvmet_confirm_sq(struct percpu_ref *ref)
+{
+ struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+ complete(&sq->confirm_done);
+}
+
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
/*
@@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
*/
if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
nvmet_async_events_free(sq->ctrl);
- percpu_ref_kill(&sq->ref);
+ percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
+ wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
@@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
return ret;
}
init_completion(&sq->free_done);
+ init_completion(&sq->confirm_done);
return 0;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d1f06e7..22f7bc6 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
struct nvme_loop_iod *iod, unsigned int queue_idx)
{
- BUG_ON(queue_idx >= ctrl->queue_count);
-
iod->req.cmd = &iod->cmd;
iod->req.rsp = &iod->rsp;
iod->queue = &ctrl->queues[queue_idx];
@@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
- nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -314,6 +312,43 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
kfree(ctrl);
}
+static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+}
+
+static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ unsigned int nr_io_queues;
+ int ret, i;
+
+ nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
+ ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
+ if (ret || !nr_io_queues)
+ return ret;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
+
+ for (i = 1; i <= nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->queue_count++;
+ }
+
+ return 0;
+
+out_destroy_queues:
+ nvme_loop_destroy_io_queues(ctrl);
+ return ret;
+}
+
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
@@ -385,17 +420,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
- int i;
-
nvme_stop_keep_alive(&ctrl->ctrl);
if (ctrl->queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
-
- for (i = 1; i < ctrl->queue_count; i++)
- nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvme_loop_destroy_io_queues(ctrl);
}
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
if (ret)
goto out_disable;
- for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
- ctrl->queues[i].ctrl = ctrl;
- ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
- if (ret)
- goto out_free_queues;
+ ret = nvme_loop_init_io_queues(ctrl);
+ if (ret)
+ goto out_destroy_admin;
- ctrl->queue_count++;
- }
-
- for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+ for (i = 1; i < ctrl->queue_count; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
- goto out_free_queues;
+ goto out_destroy_io;
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
return;
-out_free_queues:
- for (i = 1; i < ctrl->queue_count; i++)
- nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+out_destroy_io:
+ nvme_loop_destroy_io_queues(ctrl);
+out_destroy_admin:
nvme_loop_destroy_admin_queue(ctrl);
out_disable:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
{
- struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
int ret, i;
- ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
- if (ret || !opts->nr_io_queues)
+ ret = nvme_loop_init_io_queues(ctrl);
+ if (ret)
return ret;
- dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
- opts->nr_io_queues);
-
- for (i = 1; i <= opts->nr_io_queues; i++) {
- ctrl->queues[i].ctrl = ctrl;
- ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
- if (ret)
- goto out_destroy_queues;
-
- ctrl->queue_count++;
- }
-
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
ctrl->tag_set.ops = &nvme_loop_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
goto out_free_tagset;
}
- for (i = 1; i <= opts->nr_io_queues; i++) {
+ for (i = 1; i < ctrl->queue_count; i++) {
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues:
- for (i = 1; i < ctrl->queue_count; i++)
- nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvme_loop_destroy_io_queues(ctrl);
return ret;
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 1370eee..f7ff15f 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -73,6 +73,7 @@ struct nvmet_sq {
u16 qid;
u16 size;
struct completion free_done;
+ struct completion confirm_done;
};
/**
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 9aa1da3..ecc4fe8 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
{
u16 status;
- cmd->queue = queue;
- cmd->n_rdma = 0;
- cmd->req.port = queue->port;
-
-
ib_dma_sync_single_for_cpu(queue->dev->device,
cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
DMA_FROM_DEVICE);
@@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
cmd->queue = queue;
rsp = nvmet_rdma_get_rsp(queue);
+ rsp->queue = queue;
rsp->cmd = cmd;
rsp->flags = 0;
rsp->req.cmd = cmd->nvme_cmd;
+ rsp->req.port = queue->port;
+ rsp->n_rdma = 0;
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
unsigned long flags;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index bc090da..5dc53d4 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name,
* pardevice fields. -arca
*/
port->ops->init_state(par_dev, par_dev->state);
- port->proc_device = par_dev;
- parport_device_proc_register(par_dev);
+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+ port->proc_device = par_dev;
+ parport_device_proc_register(par_dev);
+ }
return par_dev;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index dc5277a..005cadb 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -449,6 +449,7 @@
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
+ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
select GENERIC_PHY
help
Support for the USB high-speed ULPI compliant phy on Qualcomm
@@ -510,12 +511,4 @@
and GXBB SoCs.
If unsure, say N.
-config PHY_NSP_USB3
- tristate "Broadcom NorthStar plus USB3 PHY driver"
- depends on OF && (ARCH_BCM_NSP || COMPILE_TEST)
- select GENERIC_PHY
- default ARCH_BCM_NSP
- help
- Enable this to support the Broadcom Northstar plus USB3 PHY.
- If unsure, say N.
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index e7b0feb..dd8f3b5 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -62,4 +62,3 @@
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
-obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o
diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c
deleted file mode 100644
index 49024ea..0000000
--- a/drivers/phy/phy-bcm-nsp-usb3.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mdio.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-
-#define NSP_USB3_RST_CTRL_OFFSET 0x3f8
-
-/* mdio reg access */
-#define NSP_USB3_PHY_BASE_ADDR_REG 0x1f
-
-#define NSP_USB3_PHY_PLL30_BLOCK 0x8000
-#define NSP_USB3_PLL_CONTROL 0x01
-#define NSP_USB3_PLLA_CONTROL0 0x0a
-#define NSP_USB3_PLLA_CONTROL1 0x0b
-
-#define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040
-#define NSP_USB3_TX_PMD_CONTROL1 0x01
-
-#define NSP_USB3_PHY_PIPE_BLOCK 0x8060
-#define NSP_USB3_LFPS_CMP 0x02
-#define NSP_USB3_LFPS_DEGLITCH 0x03
-
-struct nsp_usb3_phy {
- struct regmap *usb3_ctrl;
- struct phy *phy;
- struct mdio_device *mdiodev;
-};
-
-static int nsp_usb3_phy_init(struct phy *phy)
-{
- struct nsp_usb3_phy *iphy = phy_get_drvdata(phy);
- struct mii_bus *bus = iphy->mdiodev->bus;
- int addr = iphy->mdiodev->addr;
- u32 data;
- int rc;
-
- rc = regmap_read(iphy->usb3_ctrl, 0, &data);
- if (rc)
- return rc;
- data |= 1;
- rc = regmap_write(iphy->usb3_ctrl, 0, data);
- if (rc)
- return rc;
-
- rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
- NSP_USB3_PHY_PLL30_BLOCK);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000);
- if (rc)
- return rc;
-
- rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
- NSP_USB3_PHY_PIPE_BLOCK);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG,
- NSP_USB3_PHY_TX_PMD_BLOCK);
- if (rc)
- return rc;
-
- rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003);
-
- return rc;
-}
-
-static struct phy_ops nsp_usb3_phy_ops = {
- .init = nsp_usb3_phy_init,
- .owner = THIS_MODULE,
-};
-
-static int nsp_usb3_phy_probe(struct mdio_device *mdiodev)
-{
- struct device *dev = &mdiodev->dev;
- struct phy_provider *provider;
- struct nsp_usb3_phy *iphy;
-
- iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL);
- if (!iphy)
- return -ENOMEM;
- iphy->mdiodev = mdiodev;
-
- iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node,
- "usb3-ctrl-syscon");
- if (IS_ERR(iphy->usb3_ctrl))
- return PTR_ERR(iphy->usb3_ctrl);
-
- iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops);
- if (IS_ERR(iphy->phy)) {
- dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(iphy->phy);
- }
-
- phy_set_drvdata(iphy->phy, iphy);
-
- provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(provider)) {
- dev_err(dev, "could not register PHY provider\n");
- return PTR_ERR(provider);
- }
-
- return 0;
-}
-
-static const struct of_device_id nsp_usb3_phy_of_match[] = {
- {.compatible = "brcm,nsp-usb3-phy",},
- { /* sentinel */ }
-};
-
-static struct mdio_driver nsp_usb3_phy_driver = {
- .mdiodrv = {
- .driver = {
- .name = "nsp-usb3-phy",
- .of_match_table = nsp_usb3_phy_of_match,
- },
- },
- .probe = nsp_usb3_phy_probe,
-};
-
-mdio_module_driver(nsp_usb3_phy_driver);
-
-MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com");
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c
index 4f60b83..60baf25 100644
--- a/drivers/phy/phy-exynos-pcie.c
+++ b/drivers/phy/phy-exynos-pcie.c
@@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
exynos_phy->blk_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(exynos_phy->phy_base))
- return PTR_ERR(exynos_phy->phy_base);
+ if (IS_ERR(exynos_phy->blk_base))
+ return PTR_ERR(exynos_phy->blk_base);
exynos_phy->drv_data = drv_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7671424..31a3a98 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = {
};
static const char * const i2c_ao_groups[] = {
- "i2c_sdk_ao", "i2c_sda_ao",
+ "i2c_sck_ao", "i2c_sda_ao",
};
static const char * const i2c_slave_ao_groups[] = {
- "i2c_slave_sdk_ao", "i2c_slave_sda_ao",
+ "i2c_slave_sck_ao", "i2c_slave_sda_ao",
};
static const char * const remote_input_ao_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 676efcc..3ae8066 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
}
+static int st_gpio_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ st_gpio_direction_input(gc, d->hwirq);
+
+ return gpiochip_lock_as_irq(gc, d->hwirq);
+}
+
+static void st_gpio_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_unlock_as_irq(gc, d->hwirq);
+}
+
static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = {
};
static struct irq_chip st_gpio_irqchip = {
- .name = "GPIO",
- .irq_disable = st_gpio_irq_mask,
- .irq_mask = st_gpio_irq_mask,
- .irq_unmask = st_gpio_irq_unmask,
- .irq_set_type = st_gpio_irq_set_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .name = "GPIO",
+ .irq_request_resources = st_gpio_irq_request_resources,
+ .irq_release_resources = st_gpio_irq_release_resources,
+ .irq_disable = st_gpio_irq_mask,
+ .irq_mask = st_gpio_irq_mask,
+ .irq_unmask = st_gpio_irq_unmask,
+ .irq_set_type = st_gpio_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index b68ae42..743d1f4 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = {
PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
};
static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index c978be5..273badd 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_status_reg);
- val &= ~BIT(g->intr_status_bit);
- writel(val, pctrl->regs + g->intr_status_reg);
-
val = readl(pctrl->regs + g->intr_cfg_reg);
val |= BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f9ddba7..d7aa22c 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
for (i = 0; i < ctrl->nr_ext_resources + 1; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- virt_base[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(virt_base[i]))
- return ERR_CAST(virt_base[i]);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get mem%d resource\n", i);
+ return ERR_PTR(-EINVAL);
+ }
+ virt_base[i] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!virt_base[i]) {
+ dev_err(&pdev->dev, "failed to ioremap %pR\n", res);
+ return ERR_PTR(-EIO);
+ }
}
bank = d->pin_banks;
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig
index 815a886..5420770 100644
--- a/drivers/pinctrl/ti/Kconfig
+++ b/drivers/pinctrl/ti/Kconfig
@@ -1,6 +1,6 @@
config PINCTRL_TI_IODELAY
tristate "TI IODelay Module pinconf driver"
- depends on OF
+ depends on OF && (SOC_DRA7XX || COMPILE_TEST)
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 09b4df7..bb86569 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void)
kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL);
- if (IS_ERR(kvm_ptp_clock.ptp_clock))
- return PTR_ERR(kvm_ptp_clock.ptp_clock);
-
- return 0;
+ return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock);
}
module_init(ptp_kvm_init);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 65f86bc..1dc43fc 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -76,7 +76,7 @@
depends on OF && ARCH_QCOM
depends on REMOTEPROC
depends on QCOM_SMEM
- depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_RPROC_COMMON
@@ -93,7 +93,7 @@
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
depends on REMOTEPROC
- depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
select MFD_SYSCON
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -104,7 +104,7 @@
config QCOM_WCNSS_PIL
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
- depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n)
depends on QCOM_SMEM
depends on REMOTEPROC
select QCOM_MDT_LOADER
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4bf55b5..3c52867 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1253,20 +1253,6 @@
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
-config LPFC_NVME_INITIATOR
- bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
- depends on SCSI_LPFC && NVME_FC
- ---help---
- This enables NVME Initiator support in the Emulex lpfc driver.
-
-config LPFC_NVME_TARGET
- bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
- depends on SCSI_LPFC && NVME_TARGET_FC
- ---help---
- This enables NVME Target support in the Emulex lpfc driver.
- Target enablement must still be enabled on a per adapter
- basis by module parameters.
-
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 524a0c7..0d0be77 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
/* fill_cmd can't fail here, no data buffer to map. */
(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
scsi3addr, TYPE_MSG);
- rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
goto out;
@@ -3714,7 +3714,7 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
* # (integer code indicating one of several NOT READY states
* describing why a volume is to be kept offline)
*/
-static int hpsa_volume_offline(struct ctlr_info *h,
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
unsigned char scsi3addr[])
{
struct CommandList *c;
@@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
DEFAULT_TIMEOUT);
if (rc) {
cmd_free(h, c);
- return 0;
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
}
sense = c->err_info->SenseInfo;
if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
cmd_status = c->err_info->CommandStatus;
scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
- /* Is the volume 'not ready'? */
- if (cmd_status != CMD_TARGET_STATUS ||
- scsi_status != SAM_STAT_CHECK_CONDITION ||
- sense_key != NOT_READY ||
- asc != ASC_LUN_NOT_READY) {
- return 0;
- }
/* Determine the reason for not ready state */
ldstat = hpsa_get_volume_status(h, scsi3addr);
/* Keep volume offline in certain cases: */
switch (ldstat) {
+ case HPSA_LV_FAILED:
case HPSA_LV_UNDERGOING_ERASE:
case HPSA_LV_NOT_AVAILABLE:
case HPSA_LV_UNDERGOING_RPI:
@@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
default:
break;
}
- return 0;
+ return HPSA_LV_OK;
}
/*
@@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
/* Do an inquiry to the device to see what it is. */
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
- /* Inquiry failed (msg printed already) */
dev_err(&h->pdev->dev,
- "hpsa_update_device_info: inquiry failed\n");
- rc = -EIO;
+ "%s: inquiry failed, device will be skipped.\n",
+ __func__);
+ rc = HPSA_INQUIRY_FAILED;
goto bail_out;
}
@@ -3885,15 +3879,19 @@ static int hpsa_update_device_info(struct ctlr_info *h,
if ((this_device->devtype == TYPE_DISK ||
this_device->devtype == TYPE_ZBC) &&
is_logical_dev_addr_mode(scsi3addr)) {
- int volume_offline;
+ unsigned char volume_offline;
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
volume_offline = hpsa_volume_offline(h, scsi3addr);
- if (volume_offline < 0 || volume_offline > 0xff)
- volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
- this_device->volume_offline = volume_offline & 0xff;
+ if (volume_offline == HPSA_LV_FAILED) {
+ rc = HPSA_LV_FAILED;
+ dev_err(&h->pdev->dev,
+ "%s: LV failed, device will be skipped.\n",
+ __func__);
+ goto bail_out;
+ }
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
@@ -4379,8 +4377,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
goto out;
}
if (rc) {
- dev_warn(&h->pdev->dev,
- "Inquiry failed, skipping device.\n");
+ h->drv_req_rescan = 1;
continue;
}
@@ -5558,7 +5555,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
spin_lock_irqsave(&h->scan_lock, flags);
h->scan_finished = 1;
- wake_up_all(&h->scan_wait_queue);
+ wake_up(&h->scan_wait_queue);
spin_unlock_irqrestore(&h->scan_lock, flags);
}
@@ -5576,11 +5573,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
+ /*
+ * If a scan is already waiting to run, no need to add another
+ */
+ spin_lock_irqsave(&h->scan_lock, flags);
+ if (h->scan_waiting) {
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
if (h->scan_finished)
break;
+ h->scan_waiting = 1;
spin_unlock_irqrestore(&h->scan_lock, flags);
wait_event(h->scan_wait_queue, h->scan_finished);
/* Note: We don't need to worry about a race between this
@@ -5590,6 +5599,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
*/
}
h->scan_finished = 0; /* mark scan as in progress */
+ h->scan_waiting = 0;
spin_unlock_irqrestore(&h->scan_lock, flags);
if (unlikely(lockup_detected(h)))
@@ -8792,6 +8802,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_waitqueue_head(&h->event_sync_wait_queue);
mutex_init(&h->reset_mutex);
h->scan_finished = 1; /* no scan currently in progress */
+ h->scan_waiting = 0;
pci_set_drvdata(pdev, h);
h->ndevices = 0;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index bf6cdc1..6f04f2a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -201,6 +201,7 @@ struct ctlr_info {
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
int scan_finished;
+ u8 scan_waiting : 1;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a584cdf..5961705 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -156,6 +156,7 @@
#define CFGTBL_BusType_Fibre2G 0x00000200l
/* VPD Inquiry types */
+#define HPSA_INQUIRY_FAILED 0x02
#define HPSA_VPD_SUPPORTED_PAGES 0x00
#define HPSA_VPD_LV_DEVICE_ID 0x83
#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
@@ -166,6 +167,7 @@
/* Logical volume states */
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
#define HPSA_LV_OK 0x0
+#define HPSA_LV_FAILED 0x01
#define HPSA_LV_NOT_AVAILABLE 0x0b
#define HPSA_LV_UNDERGOING_ERASE 0x0F
#define HPSA_LV_UNDERGOING_RPI 0x12
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c3be3e6..22819af 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3315,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
* 3 - register both FCP and NVME
- * Supported values are [1,3]. Default value is 3
+ * Supported values are [1,3]. Default value is 1
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
"Define fc4 type to register with fabric.");
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2697d49..6cc561b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5891,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Check to see if it matches any module parameter */
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
if (wwn == lpfc_enable_nvmet[i]) {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6017 NVME Target %016llx\n",
wwn);
phba->nvmet_support = 1; /* a match */
+#else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6021 Can't enable NVME Target."
+ " NVME_TARGET_FC infrastructure"
+ " is not in kernel\n");
+#endif
}
}
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 0a4c190..0024de1 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2149,7 +2149,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
&vport->phba->pcidev->dev, &localport);
#else
@@ -2190,7 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
void
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
{
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2274,7 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
int
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
int ret = 0;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
@@ -2403,7 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
-#ifdef CONFIG_LPFC_NVME_INITIATOR
+#if (IS_ENABLED(CONFIG_NVME_FC))
int ret;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b7739a5..7ca868f 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -671,7 +671,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
&phba->pcidev->dev,
&phba->targetport);
@@ -756,7 +756,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
void
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
{
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_tgtport *tgtp;
if (phba->nvmet_support == 0)
@@ -788,7 +788,7 @@ static void
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct hbq_dmabuf *nvmebuf)
{
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -891,7 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct rqb_dmabuf *nvmebuf,
uint64_t isr_timestamp)
{
-#ifdef CONFIG_LPFC_NVME_TARGET
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e7e5974..2b209bb 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.701.16.00-rc1"
-#define MEGASAS_RELDATE "February 2, 2017"
+#define MEGASAS_VERSION "07.701.17.00-rc1"
+#define MEGASAS_RELDATE "March 2, 2017"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7ac9a9e..0016f12c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1963,6 +1963,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
if (!mr_device_priv_data)
return -ENOMEM;
sdev->hostdata = mr_device_priv_data;
+
+ atomic_set(&mr_device_priv_data->r1_ldio_hint,
+ instance->r1_ldio_hint_default);
return 0;
}
@@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
&instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
- if (is_probe)
+ if (is_probe) {
+ pci_free_irq_vectors(instance->pdev);
return megasas_setup_irqs_ioapic(instance);
- else
+ } else {
return -1;
+ }
}
}
return 0;
@@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
MPI2_REPLY_POST_HOST_INDEX_OFFSET);
}
- i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
- if (i < 0)
- goto fail_setup_irqs;
+ if (!instance->msix_vectors) {
+ i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (i < 0)
+ goto fail_setup_irqs;
+ }
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 29650ba..f990ab4d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
cpu_sel = MR_RAID_CTX_CPUSEL_1;
if (is_stream_detected(rctx_g35) &&
- (raid->level == 5) &&
+ ((raid->level == 5) || (raid->level == 6)) &&
(raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
(cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
cpu_sel = MR_RAID_CTX_CPUSEL_0;
@@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
} else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
- atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
+ (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 67c0d5a..de95293 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -3,6 +3,7 @@
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
select FW_LOADER
+ select BTREE
---help---
This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f610103..435ff7f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
- BUG_ON(atomic_read(&vha->vref_count));
-
qla2x00_free_fcports(vha);
mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
- if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
"Queue Pair delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1fc4e6..c6bffe9 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 625d438..ae11901 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
+#include <linux/btree.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
struct completion comp;
} abt;
struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[28]; /* fr fw */
- __le16 out_mb[28]; /* to fw */
+ __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
+ struct completion comp;
+ int rc;
} mbx;
struct {
struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
uint32_t handle;
uint16_t flags;
uint16_t type;
- char *name;
+ const char *name;
int iocbs;
struct qla_qpair *qpair;
u32 gen1; /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long plogi_nack_done_deadline;
+
u32 login_gen, last_login_gen;
u32 rscn_gen, last_rscn_gen;
u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
+struct qla_dif_statistics {
+ uint64_t dif_input_bytes;
+ uint64_t dif_output_bytes;
+ uint64_t dif_input_requests;
+ uint64_t dif_output_requests;
+ uint32_t dif_guard_err;
+ uint32_t dif_ref_tag_err;
+ uint32_t dif_app_tag_err;
+};
+
struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
uint32_t stat_max_pend_cmds;
uint32_t stat_max_qfull_cmds_alloc;
uint32_t stat_max_qfull_cmds_dropped;
+
+ struct qla_dif_statistics qla_dif_stats;
};
struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
unsigned long long transfer_bytes;
};
+struct qla_tc_param {
+ struct scsi_qla_host *vha;
+ uint32_t blk_sz;
+ uint32_t bufflen;
+ struct scatterlist *sg;
+ struct scatterlist *prot_sg;
+ struct crc_context *ctx;
+ uint8_t *ctx_dsd_alloced;
+};
+
/* Multi queue support */
#define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE];
struct dentry *dfs_tgt_sess;
+ struct dentry *dfs_tgt_port_database;
+
struct list_head q_full_list;
uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
spinlock_t sess_lock;
int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned;
+ struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+#define QLA_EARLY_LINKUP(_ha) \
+ ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+ _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1;
- /* 35 bits */
+
+ uint32_t lip_ae:1;
+ uint32_t n2n_ae:1;
+ uint32_t fw_started:1;
+ uint32_t fw_init_done:1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
#define P2P_LOOP 3
uint8_t interrupts_on;
uint32_t isp_abort_cnt;
-
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
struct list_head vp_fcports; /* list of fcports */
struct list_head work_list;
spinlock_t work_lock;
+ struct work_struct iocb_work;
/* Commonly used flags and state information. */
struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
/* Count of active session/fcport */
int fcport_count;
wait_queue_head_t fcport_waitQ;
+ wait_queue_head_t vref_waitq;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
mb(); \
if (__vha->flags.delete_progress) { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
__bail = 1; \
} else { \
__bail = 0; \
} \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
+} while (0) \
#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
atomic_inc(&__qpair->ref_count); \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index b48cce6..989e17b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct fc_port *sess = NULL;
- struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- seq_printf(s, "%s\n",vha->host_str);
+ seq_printf(s, "%s\n", vha->host_str);
if (tgt) {
- seq_printf(s, "Port ID Port Name Handle\n");
+ seq_puts(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
-
static const struct file_operations dfs_tgt_sess_ops = {
.open = qla2x00_dfs_tgt_sess_open,
.read = seq_read,
@@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
};
static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ struct gid_list_info *gid_list;
+ dma_addr_t gid_list_dma;
+ fc_port_t fc_port;
+ char *id_iter;
+ int rc, i;
+ uint16_t entries, loop_id;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+ seq_printf(s, "%s\n", vha->host_str);
+ if (tgt) {
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x705c,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
+
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
+
+ id_iter = (char *)gid_list;
+
+ seq_puts(s, "Port Name Port ID Loop ID\n");
+
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
+
+ fc_port.loop_id = loop_id;
+
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
+ }
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ }
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+
+ return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+ .open = qla2x00_dfs_tgt_port_database_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
seq_printf(s, "num Q full sent = %lld\n",
vha->tgt_counters.num_q_full_sent);
+ /* DIF stats */
+ seq_printf(s, "DIF Inp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_bytes);
+ seq_printf(s, "DIF Outp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_bytes);
+ seq_printf(s, "DIF Inp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_requests);
+ seq_printf(s, "DIF Outp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_requests);
+ seq_printf(s, "DIF Guard err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_guard_err);
+ seq_printf(s, "DIF Ref tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+ seq_printf(s, "DIF App tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_app_tag_err);
return 0;
}
@@ -281,6 +367,14 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
goto out;
}
+ ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+ if (!ha->tgt.dfs_tgt_port_database) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to create debugFS tgt_port_database node.\n");
+ goto out;
+ }
+
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->tgt.dfs_tgt_sess = NULL;
}
+ if (ha->tgt.dfs_tgt_port_database) {
+ debugfs_remove(ha->tgt.dfs_tgt_port_database);
+ ha->tgt.dfs_tgt_port_database = NULL;
+ }
+
if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b3d6441..5b24517 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
/*
* Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
- dma_addr_t, uint);
+ dma_addr_t, uint16_t);
extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+ uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+ struct port_database_24xx *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 32fb900..f9d2fe7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- uint64_t zero = 0;
struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
- /* Check for logged in state. */
- if (pd->current_login_state != PDS_PRLI_COMPLETE &&
- pd->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd->current_login_state,
- pd->last_login_state, fcport->loop_id);
- rval = QLA_FUNCTION_FAILED;
- goto gpd_error_out;
- }
-
- if (fcport->loop_id == FC_NO_LOOP_ID ||
- (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
- memcmp(fcport->port_name, pd->port_name, 8))) {
- /* We lost the device mid way. */
- rval = QLA_NOT_LOGGED_IN;
- goto gpd_error_out;
- }
-
- /* Names are little-endian. */
- memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
-
- /* Get port_id of device. */
- fcport->d_id.b.domain = pd->port_id[0];
- fcport->d_id.b.area = pd->port_id[1];
- fcport->d_id.b.al_pa = pd->port_id[2];
- fcport->d_id.b.rsvd_1 = 0;
-
- /* If not target must be initiator or unknown type. */
- if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
-
- /* Passback COS information. */
- fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
- FC_COS_CLASS2 : FC_COS_CLASS3;
-
- if (pd->prli_svc_param_word_3[0] & BIT_7) {
- fcport->flags |= FCF_CONF_COMP_SUPPORTED;
- fcport->conf_compl_supported = 1;
- }
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
gpd_error_out:
memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_retry--;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return 0;
+ }
+
/* for pure Target Mode. Login will not be initiated */
if (vha->host->active_mode == MODE_TARGET)
return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return;
+ }
+
if (fcport->flags & FCF_ASYNC_SENT) {
fcport->login_retry++;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
complete(&abt->u.abt.comp);
}
-static int
+int
qla24xx_async_abort_cmd(srb_t *cmd_sp)
{
scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
+ ha->flags.fw_started = 1;
}
return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
uint8_t domain;
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ port_id_t id;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* Save Host port and loop ID. */
/* byte order - Big Endian */
- vha->d_id.b.domain = domain;
- vha->d_id.b.area = area;
- vha->d_id.b.al_pa = al_pa;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ id.b.domain = domain;
+ id.b.area = area;
+ id.b.al_pa = al_pa;
+ id.b.rsvd_1 = 0;
+ qlt_update_host_map(vha, id);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n");
+ ha->flags.fw_init_done = 1;
/*
* Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
}
}
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
ha->chip_reset++;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
return;
if (!ha->fw_major_version)
return;
+ if (!ha->flags.fw_started)
+ return;
ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
"Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha);
}
+
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5350792..ea027f6 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct scatterlist *sg_prot;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
-
uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
@@ -1005,7 +1004,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3c66ea2..3203367 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -708,6 +708,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
ha->isp_ops->fw_dump(vha, 1);
+ ha->flags.fw_init_done = 0;
+ ha->flags.fw_started = 0;
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
+ ha->flags.lip_ae = 1;
+ ha->flags.n2n_ae = 0;
+
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -797,6 +802,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(®24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
@@ -866,6 +875,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
/* case MBA_DCBX_COMPLETE: */
case MBA_POINT_TO_POINT: /* Point-to-Point */
+ ha->flags.lip_ae = 0;
+ ha->flags.n2n_ae = 1;
+
if (IS_QLA2100(ha))
break;
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5034,
- "Async-%s error entry - hdl=%x"
+ "Async-%s error entry - %8phC hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, sp->handle, fcport->d_id.b.domain,
+ type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
logio->entry_status);
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, fcport->port_name, sp->handle,
+ fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
+ case LSC_SCODE_CMD_FAILED:
+ if (iop[1] == 0x0606) {
+ /*
+ * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+ * Target side acked.
+ */
+ data[0] = MBS_COMMAND_COMPLETE;
+ goto logio_done;
+ }
+ data[0] = MBS_COMMAND_ERROR;
+ break;
case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, fcport->port_name,
+ sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
sp->done(sp, 0);
}
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 35079f4..a113ab3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,28 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+static struct mb_cmd_name {
+ uint16_t cmd;
+ const char *str;
+} mb_str[] = {
+ {MBC_GET_PORT_DATABASE, "GPDB"},
+ {MBC_GET_ID_LIST, "GIDList"},
+ {MBC_GET_LINK_PRIV_STATS, "Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+ int i;
+ struct mb_cmd_name *e;
+
+ for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+ e = mb_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int
qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
- dma_addr_t stats_dma, uint options)
+ dma_addr_t stats_dma, uint16_t options)
{
int rval;
mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
- mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
- mcp->mb[2] = MSW(stats_dma);
- mcp->mb[3] = LSW(stats_dma);
- mcp->mb[6] = MSW(MSD(stats_dma));
- mcp->mb[7] = LSW(MSD(stats_dma));
- mcp->mb[8] = sizeof(struct link_statistics) / 4;
- mcp->mb[9] = vha->vp_idx;
- mcp->mb[10] = options;
- mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
- mcp->in_mb = MBX_2|MBX_1|MBX_0;
- mcp->tov = MBX_TOV_SECONDS;
- mcp->flags = IOCTL_CMD;
- rval = qla2x00_mailbox_command(vha, mcp);
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+ mc.mb[2] = MSW(stats_dma);
+ mc.mb[3] = LSW(stats_dma);
+ mc.mb[6] = MSW(MSD(stats_dma));
+ mc.mb[7] = LSW(MSD(stats_dma));
+ mc.mb[8] = sizeof(struct link_statistics) / 4;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16(options);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp = NULL;
unsigned long flags;
int found;
+ port_id_t id;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (rptid_entry->entry_status != 0)
return;
+ id.b.domain = rptid_entry->port_id[2];
+ id.b.area = rptid_entry->port_id[1];
+ id.b.al_pa = rptid_entry->port_id[0];
+ id.b.rsvd_1 = 0;
+
if (rptid_entry->format == 0) {
/* loop */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+ ql_dbg(ql_dbg_async, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n", rptid_entry->vp_setup,
rptid_entry->vp_acquired);
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+ ql_dbg(ql_dbg_async, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
/* fabric */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+ ql_dbg(ql_dbg_async, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", rptid_entry->vp_idx,
rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
WWN_SIZE);
}
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
}
fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (!found)
return;
- vp->d_id.b.domain = rptid_entry->port_id[2];
- vp->d_id.b.area = rptid_entry->port_id[1];
- vp->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vp, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vp, id);
/*
* Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ sp->u.iocb_cmd.u.mbx.rc = res;
+
+ complete(&sp->u.iocb_cmd.u.mbx.comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ struct srb_iocb *c;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+ c = &sp->u.iocb_cmd;
+ c->timeout = qla2x00_async_iocb_timeout;
+ init_completion(&c->u.mbx.comp);
+
+ sp->done = qla2x00_async_mb_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&c->u.mbx.comp);
+ memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+ rval = c->u.mbx.rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ sp->free(sp);
+ break;
+ default:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ sp->free(sp);
+ break;
+ }
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ dma_addr_t pd_dma;
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate port database structure.\n");
+ goto done_free_sp;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_PORT_DATABASE;
+ mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[2] = MSW(pd_dma);
+ mc.mb[3] = LSW(pd_dma);
+ mc.mb[6] = MSW(MSD(pd_dma));
+ mc.mb[7] = LSW(MSD(pd_dma));
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %8phC fail\n", __func__, fcport->port_name);
+ goto done_free_sp;
+ }
+
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+ __func__, fcport->port_name);
+
+done_free_sp:
+ if (pd)
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+ return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rval = QLA_SUCCESS;
+ uint64_t zero = 0;
+
+ /* Check for logged in state. */
+ if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+ pd->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd->current_login_state,
+ pd->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+ memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[1];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd->prli_svc_param_word_3[0] & BIT_7) {
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ fcport->conf_compl_supported = 1;
+ }
+
+gpd_error_out:
+ return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+ void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_ID_LIST;
+ mc.mb[2] = MSW(id_list_dma);
+ mc.mb[3] = LSW(id_list_dma);
+ mc.mb[6] = MSW(MSD(id_list_dma));
+ mc.mb[7] = LSW(MSD(id_list_dma));
+ mc.mb[8] = 0;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ *entries = mc.mb[1];
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c6d6f0d..09a490c 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
+ wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+ 10*HZ);
+
spin_lock_irqsave(&ha->vport_slock, flags);
- while (atomic_read(&vha->vref_count)) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- msleep(500);
-
- spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count)) {
+ ql_dbg(ql_dbg_vport, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ vha->vref_count = (atomic_t)ATOMIC_INIT(0);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
i++;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fed235..41d5b09 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+ struct scsi_qla_host *vha = container_of(work,
+ struct scsi_qla_host, iocb_work);
+ int cnt = 0;
+
+ while (!list_empty(&vha->work_list)) {
+ qla2x00_do_work(vha);
+ cnt++;
+ if (cnt > 10)
+ break;
+ }
+}
+
/*
* PCI driver interface
*/
@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
qla2xxx_wake_dpc(base_vha);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
+ qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
+ init_waitqueue_head(&vha->vref_waitq);
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
spin_unlock_irqrestore(&vha->work_lock, flags);
- qla2xxx_wake_dpc(vha);
+
+ if (QLA_EARLY_LINKUP(vha->hw))
+ schedule_work(&vha->iocb_work);
+ else
+ qla2xxx_wake_dpc(vha);
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 45f5077..0e03ca2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+ struct abts_recv_from_24xx *);
+
/*
* Global Variables
*/
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+static const char *prot_op_str(u32 prot_op)
+{
+ switch (prot_op) {
+ case TARGET_PROT_NORMAL: return "NORMAL";
+ case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
+ case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
+ case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
+ case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
+ case TARGET_PROT_DIN_PASS: return "DIN_PASS";
+ case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
+ default: return "UNKNOWN";
+ }
+}
+
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
uint8_t *d_id)
{
- struct qla_hw_data *ha = vha->hw;
- uint8_t vp_idx;
+ struct scsi_qla_host *host;
+ uint32_t key = 0;
- if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
- return NULL;
-
- if (vha->d_id.b.al_pa == d_id[2])
+ if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+ (vha->d_id.b.al_pa == d_id[2]))
return vha;
- BUG_ON(ha->tgt.tgt_vp_map == NULL);
- vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
- if (likely(test_bit(vp_idx, ha->vp_idx_map)))
- return ha->tgt.tgt_vp_map[vp_idx].vha;
+ key = (uint32_t)d_id[0] << 16;
+ key |= (uint32_t)d_id[1] << 8;
+ key |= (uint32_t)d_id[2];
- return NULL;
+ host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!host)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Unable to find host %06x\n", key);
+
+ return host;
}
static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
(struct abts_recv_from_24xx *)atio;
struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
entry->vp_index);
+ unsigned long flags;
+
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xffff,
"qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, (response_t *)atio);
+ if (!ha_locked)
+ spin_lock_irqsave(&host->hw->hardware_lock, flags);
+ qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
break;
-
}
/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_gen++;
sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
sp->fcport->logout_on_delete = 1;
+ sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
break;
case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->deleted = 0;
c = "PRLI";
break;
case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
}
/* Get list of logged in devices */
- rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
"qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
request_t *pkt;
struct nack_to_isp *nack;
+ if (!ha->flags.fw_started)
+ return;
+
ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
/* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
}
EXPORT_SYMBOL(qlt_free_mcmd);
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+ struct atio_from_isp *atio = &cmd->atio;
+ struct ctio7_to_24xx *ctio;
+ uint16_t temp;
+
+ ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+ "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+ "sense_key=%02x, asc=%02x, ascq=%02x",
+ vha, atio, scsi_status, sense_key, asc, ascq);
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!ctio) {
+ ql_dbg(ql_dbg_async, vha, 0x3067,
+ "qla2x00t(%ld): %s failed: unable to allocate request packet",
+ vha->host_no, __func__);
+ goto out;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE;
+ ctio->nport_handle = cmd->sess->loop_id;
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.ox_id = cpu_to_le16(temp);
+ ctio->u.status1.scsi_status =
+ cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+ ctio->u.status1.response_len = cpu_to_le16(18);
+ ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+ if (ctio->u.status1.residual != 0)
+ ctio->u.status1.scsi_status |=
+ cpu_to_le16(SS_RESIDUAL_UNDER);
+
+ /* Response code and sense key */
+ put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+ (&ctio->u.status1.sense_data)[0]);
+ /* Additional sense length */
+ put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+ /* ASC and ASCQ */
+ put_unaligned_le32(((asc << 24) | (ascq << 16)),
+ (&ctio->u.status1.sense_data)[3]);
+
+ /* Memory Barrier */
+ wmb();
+
+ qla2x00_start_iocbs(vha, vha->req);
+out:
+ return;
+}
+
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h-1] = prm->cmd;
+ ha->tgt.cmds[h - 1] = prm->cmd;
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
return cmd->bufflen > 0;
}
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd;
+ struct scsi_qla_host *vha;
+
+ /* asc 0x10=dif error */
+ if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+ cmd = prm->cmd;
+ vha = cmd->vha;
+ /* ASCQ */
+ switch (prm->sense_buffer[13]) {
+ case 1:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ }
+ ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+ }
+}
+
/*
* Called without ha->hardware_lock held
*/
@@ -2512,18 +2649,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
for (i = 0; i < prm->sense_buffer_len/4; i++)
((uint32_t *)ctio->u.status1.sense_data)[i] =
cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-#if 0
- if (unlikely((prm->sense_buffer_len % 4) != 0)) {
- static int q;
- if (q < 10) {
- ql_dbg(ql_dbg_tgt, vha, 0xe04f,
- "qla_target(%d): %d bytes of sense "
- "lost", prm->tgt->ha->vp_idx,
- prm->sense_buffer_len % 4);
- q++;
- }
- }
-#endif
+
+ qlt_print_dif_err(prm);
+
} else {
ctio->u.status1.flags &=
~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
/* Sense with len > 24, is it possible ??? */
}
-
-
-/* diff */
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
- /*
- * Uncomment when corresponding SCSI changes are done.
- *
- if (!sp->cmd->prot_chk)
- return 0;
- *
- */
switch (se_cmd->prot_op) {
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
return 0;
}
-/*
- * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
- *
- */
-static inline void
-qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
{
- uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_STRIP:
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ return 1;
+ default:
+ return 0;
+ }
+ return 0;
+}
- /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+/*
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
+ */
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+ uint16_t *pfw_prot_opts)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+ scsi_qla_host_t *vha = cmd->tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t t32 = 0;
+
+ /*
+ * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
* have been immplemented by TCM, before AppTag is avail.
* Look for modesense_handlers[]
*/
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
ctx->app_tag_mask[0] = 0x0;
ctx->app_tag_mask[1] = 0x0;
+ if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+ (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+ *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+ else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+ *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+ }
+
+ t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
switch (se_cmd->prot_type) {
case TARGET_DIF_TYPE0_PROT:
/*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
+ * No check for ql2xenablehba_err_chk, as it
+ * would be an I/O error if hba tag generation
+ * is not done.
*/
ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
/* enable ALL bytes of the ref tag */
ctx->ref_tag_mask[0] = 0xff;
ctx->ref_tag_mask[1] = 0xff;
ctx->ref_tag_mask[2] = 0xff;
ctx->ref_tag_mask[3] = 0xff;
break;
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
case TARGET_DIF_TYPE1_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
+ /*
+ * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+ * REF tag, and 16 bit app tag.
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE2_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+ * tag has to match LBA in CDB + N
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE3_PROT:
- ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
- ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
- break;
+ /* For TYPE 3 protection: 16 bit GUARD only */
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+ ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+ break;
}
}
-
static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t h;
struct atio_from_isp *atio = &prm->cmd->atio;
+ struct qla_tc_param tc;
uint16_t t16;
ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
transfer_length = data_bytes;
- data_bytes += dif_bytes;
+ if (cmd->prot_sg_cnt)
+ data_bytes += dif_bytes;
break;
-
case TARGET_PROT_DIN_STRIP:
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS:
transfer_length = data_bytes + dif_bytes;
break;
-
default:
BUG();
break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
break;
}
-
/* ---- PKT ---- */
/* Update entry type to indicate Command Type CRC_2 IOCB */
pkt->entry_type = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
} else
ha->tgt.cmds[h-1] = prm->cmd;
-
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
- pkt->nport_handle = prm->cmd->loop_id;
+ pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
-
pkt->dseg_count = prm->tot_dsds;
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
-
/* ----- CRC context -------- */
/* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Set handle */
crc_ctx_pkt->handle = pkt->handle;
- qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+ qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
-
if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
} else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
+ memset((uint8_t *)&tc, 0 , sizeof(tc));
+ tc.vha = vha;
+ tc.blk_sz = cmd->blk_sz;
+ tc.bufflen = cmd->bufflen;
+ tc.sg = cmd->sg;
+ tc.prot_sg = cmd->prot_sg;
+ tc.ctx = crc_ctx_pkt;
+ tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
/* Walks data segments */
pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
if (!bundling && prm->prot_seg_cnt) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
- prm->tot_dsds, cmd))
+ prm->tot_dsds, &tc))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
- (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+ (prm->tot_dsds - prm->prot_seg_cnt), &tc))
goto crc_queuing_error;
if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
- prm->prot_seg_cnt, cmd))
+ prm->prot_seg_cnt, &tc))
goto crc_queuing_error;
}
return QLA_SUCCESS;
crc_queuing_error:
/* Cleanup will be performed by the caller */
+ vha->hw->tgt.cmds[h - 1] = NULL;
return QLA_FUNCTION_FAILED;
}
-
/*
* Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
* QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
else
vha->tgt_counters.core_qla_que_buf++;
- if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
+ if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
+ if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
/*
- * Checks the guard or meta-data for the type of error
- * detected by the HBA.
+ * it is assumed either hardware_lock or qpair lock is held.
*/
-static inline int
+static void
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
- struct ctio_crc_from_fw *sts)
+ struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
uint8_t *ep = &sts->expected_dif[0];
- uint32_t e_ref_tag, a_ref_tag;
- uint16_t e_app_tag, a_app_tag;
- uint16_t e_guard, a_guard;
uint64_t lba = cmd->se_cmd.t_task_lba;
+ uint8_t scsi_status, sense_key, asc, ascq;
+ unsigned long flags;
- a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
- a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
- a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+ cmd->trc_flags |= TRC_DIF_ERR;
- e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
- e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
- e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+ cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
+ cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+ cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
- ql_dbg(ql_dbg_tgt, vha, 0xe075,
- "iocb(s) %p Returned STATUS.\n", sts);
+ cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
+ cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+ cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
- ql_dbg(ql_dbg_tgt, vha, 0xf075,
- "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+ "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
- /*
- * Ignore sector if:
- * For type 3: ref & app tag is all 'f's
- * For type 0,1,2: app tag is all 'f's
- */
- if ((a_app_tag == 0xffff) &&
- ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
- (a_ref_tag == 0xffffffff))) {
- uint32_t blocks_done;
+ scsi_status = sense_key = asc = ascq = 0;
- /* 2TB boundary case covered automatically with this */
- blocks_done = e_ref_tag - (uint32_t)lba + 1;
- cmd->se_cmd.bad_sector = e_ref_tag;
- cmd->se_cmd.pi_err = 0;
- ql_dbg(ql_dbg_tgt, vha, 0xf074,
- "need to return scsi good\n");
+ /* check appl tag */
+ if (cmd->e_app_tag != cmd->a_app_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
- /* Update protection tag */
- if (cmd->prot_sg_cnt) {
- uint32_t i, k = 0, num_ent;
- struct scatterlist *sg, *sgl;
-
-
- sgl = cmd->prot_sg;
-
- /* Patch the corresponding protection tags */
- for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
- num_ent = sg_dma_len(sg) / 8;
- if (k + num_ent < blocks_done) {
- k += num_ent;
- continue;
- }
- k = blocks_done;
- break;
- }
-
- if (k != blocks_done) {
- ql_log(ql_log_warn, vha, 0xf076,
- "unexpected tag values tag:lba=%u:%llu)\n",
- e_ref_tag, (unsigned long long)lba);
- goto out;
- }
-
-#if 0
- struct sd_dif_tuple *spt;
- /* TODO:
- * This section came from initiator. Is it valid here?
- * should ulp be override with actual val???
- */
- spt = page_address(sg_page(sg)) + sg->offset;
- spt += j;
-
- spt->app_tag = 0xffff;
- if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
- spt->ref_tag = 0xffffffff;
-#endif
- }
-
- return 0;
- }
-
- /* check guard */
- if (e_guard != a_guard) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe076,
- "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ cmd->dif_err_code = DIF_ERR_APP;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x2;
}
/* check ref tag */
- if (e_ref_tag != a_ref_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = e_ref_tag;
+ if (cmd->e_ref_tag != cmd->a_ref_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard[%x|%x] cmd=%p ox_id[%04x] ",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
- ql_log(ql_log_warn, vha, 0xe077,
- "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
+ cmd->dif_err_code = DIF_ERR_REF;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x3;
goto out;
}
- /* check appl tag */
- if (e_app_tag != a_app_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe078,
- "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check guard */
+ if (cmd->e_guard != cmd->a_guard) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ cmd->dif_err_code = DIF_ERR_GRD;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x1;
}
out:
- return 1;
-}
+ switch (cmd->state) {
+ case QLA_TGT_STATE_NEED_DATA:
+ /* handle_data will load DIF error code */
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ break;
+ default:
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+ /* assume scsi status gets out on the wire.
+ * Will not wait for completion.
+ */
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+}
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
"Sending TERM ELS CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe080,
"qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
{
int term = 0;
+ if (cmd->se_cmd.prot_op)
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x] op %#x/%s",
+ cmd->lba, cmd->lba,
+ cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr,
+ cmd->se_cmd.prot_op,
+ prot_op_str(cmd->se_cmd.prot_op));
+
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+ "qla_target(%d): CTIO with DIF_ERROR status %x "
+ "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+ "expect_dif[0x%llx]\n",
vha->vp_idx, status, cmd->state, se_cmd,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- if (qlt_handle_dif_error(vha, cmd, ctio)) {
- if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- /* scsi Write/xfer rdy complete */
- goto skip_term;
- } else {
- /* scsi read/xmit respond complete
- * call handle dif to send scsi status
- * rather than terminate exchange.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ha->tgt.tgt_ops->handle_dif_err(cmd);
- return;
- }
- } else {
- /* Need to generate a SCSI good completion.
- * because FW did not send scsi status.
- */
- status = 0;
- goto skip_term;
- }
- break;
+ qlt_handle_dif_error(vha, cmd, ctio);
+ return;
}
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
}
}
-skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (sess != NULL) {
- if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
+ if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+ sess->fw_login_state != DSC_LS_PLOGI_COMP) {
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Make session global (not used in fabric mode) */
if (ha->current_topology != ISP_CFG_F) {
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
+ qla24xx_post_nack_work(vha, sess, iocb,
+ SRB_NACK_PRLI);
+ res = 0;
+ } else {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
} else {
if (sess) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post nack\n",
- __func__, __LINE__, sess->port_name);
-
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
SRB_NACK_PRLI);
res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
break;
-
case ELS_TPRLO:
if (le16_to_cpu(iocb->u.isp24.flags) &
NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+ struct atio_from_isp *atio, bool ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
+ unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
return 0;
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
qlt_send_busy(vha, atio, status);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return 1;
}
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
unsigned long flags;
if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_io, vha, 0x3064,
+ ql_dbg(ql_dbg_tgt, vha, 0x3064,
"ATIO pkt, but no tgt (ha %p)", ha);
return;
}
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
if (rc != 0) {
tgt->irq_cmd_count--;
return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
- rc = qla2x00_get_port_database(vha, fcport, 0);
+ rc = qla24xx_gpdb_wait(vha, fcport, 0);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
"qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term;
-
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
if (rc != 0)
goto out_term;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
out_term2:
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
-out_term:
- qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
+out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (tgt->tgt_stop)
- goto out_term;
+ goto out_term2;
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (!sess)
- goto out_term;
+ goto out_term2;
} else {
if (sess->deleted) {
sess = NULL;
- goto out_term;
+ goto out_term2;
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
sess = NULL;
- goto out_term;
+ goto out_term2;
}
}
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
if (rc != 0)
goto out_term;
-
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
+out_term2:
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
- if (base_vha->fc_vport)
- return 0;
-
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
+ if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+ ha->tgt.tgt_ops->add_target(base_vha);
+
return 0;
}
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+ struct scsi_qla_host *node;
+ u32 key = 0;
+
+ btree_for_each_safe32(&ha->tgt.host_map, key, node)
+ btree_remove32(&ha->tgt.host_map, key);
+
+ btree_destroy32(&ha->tgt.host_map);
+}
+
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
unsigned char *b)
{
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
struct atio_from_isp *pkt;
int cnt, i;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_unknown_atio_work_fn);
qlt_clear_mode(base_vha);
+
+ rc = btree_init32(&ha->tgt.host_map);
+ if (rc)
+ ql_log(ql_log_info, base_vha, 0xffff,
+ "Unable to initialize ha->host_map btree\n");
+
+ qlt_update_vp_map(base_vha, SET_VP_IDX);
}
irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(op);
}
void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
+ void *slot;
+ u32 key;
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
+ key = vha->d_id.b24;
+
switch (cmd) {
case SET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!slot) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Save vha in host_map %p %06x\n", vha, key);
+ rc = btree_insert32(&vha->hw->tgt.host_map,
+ key, vha, GFP_ATOMIC);
+ if (rc)
+ ql_log(ql_log_info, vha, 0xffff,
+ "Unable to insert s_id into host_map: %06x\n",
+ key);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "replace existing vha in host_map %p %06x\n", vha, key);
+ btree_update32(&vha->hw->tgt.host_map, key, vha);
break;
case RESET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "clear vha in host_map %p %06x\n", vha, key);
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (slot)
+ btree_remove32(&vha->hw->tgt.host_map, key);
+ vha->d_id.b24 = 0;
break;
}
}
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->d_id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ } else if (vha->d_id.b24 != id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, RESET_AL_PA);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ }
+}
+
static int __init qlt_parse_ini_mode(void)
{
if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index a7f90dc..d644202 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
}
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+ int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+ return (be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
- void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
void (*clear_nacl_from_fcport_map)(struct fc_port *);
void (*put_sess)(struct fc_port *);
void (*shutdown_sess)(struct fc_port *);
+ int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+ int (*chk_dif_tags)(uint32_t tag);
+ void (*add_target)(struct scsi_qla_host *);
};
int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_ABORT_ALL 0xFFFE
#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
#define QLA_TGT_NEXUS_LOSS 0xFFFC
-#define QLA_TGT_ABTS 0xFFFB
-#define QLA_TGT_2G_ABORT_TASK 0xFFFA
+#define QLA_TGT_ABTS 0xFFFB
+#define QLA_TGT_2G_ABORT_TASK 0xFFFA
/* Notify Acknowledge flags */
#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
TRC_CMD_FREE = BIT_17,
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
+ TRC_DIF_ERR = BIT_20,
};
struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
unsigned int sg_mapped:1;
unsigned int free_sg:1;
unsigned int write_data_transferred:1;
- unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
struct list_head cmd_list;
struct atio_from_isp atio;
- /* t10dif */
+
+ uint8_t ctx_dsd_alloced;
+
+ /* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+ int8_t dif_err_code;
struct scatterlist *prot_sg;
uint32_t prot_sg_cnt;
- uint32_t blk_sz;
+ uint32_t blk_sz, num_blks;
+ uint8_t scsi_status, sense_key, asc, ascq;
+
struct crc_context *ctx;
+ uint8_t *cdb;
+ uint64_t lba;
+ uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
+ uint32_t a_ref_tag, e_ref_tag;
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+ uint8_t, uint8_t, uint8_t);
+
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3cb1964..45bc84e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.38-k"
+#define QLA2XXX_VERSION "9.00.00.00-k"
-#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 7
+#define QLA_DRIVER_MAJOR_VER 9
+#define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8e8ab0f..7443e4e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
+ switch (cmd->dif_err_code) {
+ case DIF_ERR_GRD:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case DIF_ERR_REF:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_APP:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_NONE:
+ default:
+ break;
+ }
+
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
-static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
{
- struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-
- /* take an extra kref to prevent cmd free too early.
- * need to wait for SCSI status/check condition to
- * finish responding generate by transport_generic_request_failure.
- */
- kref_get(&cmd->se_cmd.cmd_kref);
- transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+ return 0;
}
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+ uint16_t *pfw_prot_opts)
{
- INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+ *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+ return 0;
}
/*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
- .handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
.put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
+ .get_dif_tags = tcm_qla2xxx_dif_tags,
+ .chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
};
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1359913..e8c26e6e 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7642,7 +7642,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
if (kstrtoul(buf, 0, &value))
return -EINVAL;
- if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
+ if (value >= UFS_PM_LVL_MAX)
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index f5e3300..fd7c16a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -43,7 +43,7 @@
#include "target_core_ua.h"
static sense_reason_t core_alua_check_transition(int state, int valid,
- int *primary);
+ int *primary, int explicit);
static int core_alua_set_tg_pt_secondary_state(
struct se_lun *lun, int explicit, int offline);
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state,
- valid_states, &primary);
+ rc = core_alua_check_transition(alua_access_state, valid_states,
+ &primary, 1);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
* Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
-core_alua_check_transition(int state, int valid, int *primary)
+core_alua_check_transition(int state, int valid, int *primary, int explicit)
{
/*
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
*primary = 0;
break;
case ALUA_ACCESS_STATE_TRANSITION:
- /*
- * Transitioning is set internally, and
- * cannot be selected manually.
- */
- goto not_supported;
+ if (!(valid & ALUA_T_SUP) || explicit)
+ /*
+ * Transitioning is set internally and by tcmu daemon,
+ * and cannot be selected through a STPG.
+ */
+ goto not_supported;
+ *primary = 0;
+ break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
- struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
return 0;
- if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
return -EAGAIN;
/*
* Flush any pending transitions
*/
- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
- ALUA_ACCESS_STATE_TRANSITION) {
- /* Just in case */
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
- tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
- wait_for_completion(&wait);
- tg_pt_gp->tg_pt_gp_transition_complete = NULL;
- return 0;
- }
+ if (!explicit)
+ flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
- tg_pt_gp->tg_pt_gp_alua_previous_state =
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
core_alua_queue_state_change_ua(tg_pt_gp);
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ return 0;
+
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
/*
* Check for the optional ALUA primary state transition delay
*/
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
- unsigned long transition_tmo;
-
- transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work,
- transition_tmo);
- } else {
+ schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ if (explicit) {
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
}
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
+ if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ return -ENODEV;
+
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
- if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
+ if (core_alua_check_transition(new_state, valid_states, &primary,
+ explicit) != 0)
return -EINVAL;
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
- core_alua_do_transition_tg_pt_work);
+ INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+ core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev)
{
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ if (!(dev->transport->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 54b36c9..38b5025 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
+ if (!tfo->check_stop_free) {
+ pr_err("Missing tfo->check_stop_free()\n");
+ return -EINVAL;
+ }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index a8f8e53..94cda79 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
- dump_stack();
return 0;
}
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 68d8aef..c194063 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0dbfa0..6fb1919 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
if (ret)
goto out_kill_ref;
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ if (!(dev->transport->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 434d9d6..b1a3cdb 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
* Fabric modules are expected to return '1' here if the se_cmd being
* passed is released at this point, or zero if not being released.
*/
- return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
- : 0;
+ return cmd->se_tfo->check_stop_free(cmd);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c3adefe..c6874c3 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
+#include <linux/configfs.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
spinlock_t commands_lock;
struct timer_list timeout;
+ unsigned int cmd_time_out;
char dev_config[TCMU_CONFIG_LEN];
};
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+ if (udev->cmd_time_out)
+ tcmu_cmd->deadline = jiffies +
+ msecs_to_jiffies(udev->cmd_time_out);
idr_preload(GFP_KERNEL);
spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_debug("sleeping for ring space\n");
spin_unlock_irq(&udev->cmdr_lock);
- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+ if (udev->cmd_time_out)
+ ret = schedule_timeout(
+ msecs_to_jiffies(udev->cmd_time_out));
+ else
+ ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
- mod_timer(&udev->timeout,
- round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+ if (udev->cmd_time_out)
+ mod_timer(&udev->timeout, round_jiffies_up(jiffies +
+ msecs_to_jiffies(udev->cmd_time_out)));
return TCM_NO_SENSE;
}
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
}
udev->hba = hba;
+ udev->cmd_time_out = TCMU_TIME_OUT;
init_waitqueue_head(&udev->wait_cmdr);
spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
if (dev->dev_attrib.hw_block_size == 0)
dev->dev_attrib.hw_block_size = 512;
/* Other attributes can be configured in userspace */
- dev->dev_attrib.hw_max_sectors = 128;
+ if (!dev->dev_attrib.hw_max_sectors)
+ dev->dev_attrib.hw_max_sectors = 128;
dev->dev_attrib.hw_queue_depth = 128;
ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static bool tcmu_dev_configured(struct tcmu_dev *udev)
+{
+ return udev->uio_info.uio_dev ? true : false;
+}
+
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
spin_unlock_irq(&udev->commands_lock);
WARN_ON(!all_expired);
- /* Device was configured */
- if (udev->uio_info.uio_dev) {
+ if (tcmu_dev_configured(udev)) {
tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
udev->uio_info.uio_dev->minor);
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
}
enum {
- Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
+ Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
+ Opt_err,
};
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%u"},
{Opt_err, NULL}
};
+static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
+{
+ unsigned long tmp_ul;
+ char *arg_p;
+ int ret;
+
+ arg_p = match_strdup(arg);
+ if (!arg_p)
+ return -ENOMEM;
+
+ ret = kstrtoul(arg_p, 0, &tmp_ul);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for dev attrib\n");
+ return ret;
+ }
+ if (!tmp_ul) {
+ pr_err("dev attrib must be nonzero\n");
+ return -EINVAL;
+ }
+ *dev_attrib = tmp_ul;
+ return 0;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
- unsigned long tmp_ul;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_err("kstrtoul() failed for dev_size=\n");
break;
case Opt_hw_block_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoul() failed for hw_block_size=\n");
- break;
- }
- if (!tmp_ul) {
- pr_err("hw_block_size must be nonzero\n");
- break;
- }
- dev->dev_attrib.hw_block_size = tmp_ul;
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_block_size));
+ break;
+ case Opt_hw_max_sectors:
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_max_sectors));
break;
default:
break;
}
+
+ if (ret)
+ break;
}
kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
-static const struct target_backend_ops tcmu_ops = {
+static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+ u32 val;
+ int ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!val) {
+ pr_err("Illegal value for cmd_time_out\n");
+ return -EINVAL;
+ }
+
+ udev->cmd_time_out = val * MSEC_PER_SEC;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, cmd_time_out);
+
+static struct configfs_attribute **tcmu_attrs;
+
+static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
- .tb_dev_attrib_attrs = passthrough_attrib_attrs,
+ .tb_dev_attrib_attrs = NULL,
};
static int __init tcmu_module_init(void)
{
- int ret;
+ int ret, i, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ len += sizeof(struct configfs_attribute *);
+ }
+ len += sizeof(struct configfs_attribute *) * 2;
+
+ tcmu_attrs = kzalloc(len, GFP_KERNEL);
+ if (!tcmu_attrs) {
+ ret = -ENOMEM;
+ goto out_unreg_genl;
+ }
+
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ tcmu_attrs[i] = passthrough_attrib_attrs[i];
+ }
+ tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+ tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
+
ret = transport_backend_register(&tcmu_ops);
if (ret)
- goto out_unreg_genl;
+ goto out_attrs;
return 0;
+out_attrs:
+ kfree(tcmu_attrs);
out_unreg_genl:
genl_unregister_family(&tcmu_genl_family);
out_unreg_device:
@@ -1194,6 +1287,7 @@ static int __init tcmu_module_init(void)
static void __exit tcmu_module_exit(void)
{
target_backend_unregister(&tcmu_ops);
+ kfree(tcmu_attrs);
genl_unregister_family(&tcmu_genl_family);
root_device_unregister(tcmu_root_device);
kmem_cache_destroy(tcmu_cmd_cache);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6ee55a2..e65808c 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
{
unsigned int baud = tty_termios_baud_rate(termios);
struct dw8250_data *d = p->private_data;
- unsigned int rate;
+ long rate;
int ret;
if (IS_ERR(d->clk) || !old)
@@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, baud * 16);
- ret = clk_set_rate(d->clk, rate);
+ if (rate < 0)
+ ret = rate;
+ else if (rate == 0)
+ ret = -ENOENT;
+ else
+ ret = clk_set_rate(d->clk, rate);
clk_prepare_enable(d->clk);
if (!ret)
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8789ea423..56f92d7 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
if (strcmp(name, "qdf2400_e44") == 0) {
pr_info_once("UART: Working around QDF2400 SoC erratum 44");
qdf2400_e44_present = true;
- } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) {
+ } else if (strcmp(name, "pl011") != 0) {
return -ENODEV;
}
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index bcf1d33..c334bcc 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
pinctrl_select_state(ascport->pinctrl,
ascport->states[NO_HW_FLOWCTRL]);
- gpiod = devm_get_gpiod_from_child(port->dev, "rts",
- &np->fwnode);
- if (!IS_ERR(gpiod)) {
- gpiod_direction_output(gpiod, 0);
+ gpiod = devm_fwnode_get_gpiod_from_child(port->dev,
+ "rts",
+ &np->fwnode,
+ GPIOD_OUT_LOW,
+ np->name);
+ if (!IS_ERR(gpiod))
ascport->rts = gpiod;
- }
}
}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 68947f6..b0500a0 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = {
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
+ struct tty_ldisc *ld;
+
ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
- if (!tty->ldisc)
+ ld = tty->ldisc;
+ if (!ld)
ldsem_up_read(&tty->ldisc_sem);
- return tty->ldisc;
+ return ld;
}
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
@@ -489,41 +492,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * tty_ldisc_restore - helper for tty ldisc change
- * @tty: tty to recover
- * @old: previous ldisc
- *
- * Restore the previous line discipline or N_TTY when a line discipline
- * change fails due to an open error
- */
-
-static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
-{
- struct tty_ldisc *new_ldisc;
- int r;
-
- /* There is an outstanding reference here so this is safe */
- old = tty_ldisc_get(tty, old->ops->num);
- WARN_ON(IS_ERR(old));
- tty->ldisc = old;
- tty_set_termios_ldisc(tty, old->ops->num);
- if (tty_ldisc_open(tty, old) < 0) {
- tty_ldisc_put(old);
- /* This driver is always present */
- new_ldisc = tty_ldisc_get(tty, N_TTY);
- if (IS_ERR(new_ldisc))
- panic("n_tty: get");
- tty->ldisc = new_ldisc;
- tty_set_termios_ldisc(tty, N_TTY);
- r = tty_ldisc_open(tty, new_ldisc);
- if (r < 0)
- panic("Couldn't open N_TTY ldisc for "
- "%s --- error %d.",
- tty_name(tty), r);
- }
-}
-
-/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
* @ldisc: the line discipline
@@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
int tty_set_ldisc(struct tty_struct *tty, int disc)
{
- int retval;
- struct tty_ldisc *old_ldisc, *new_ldisc;
-
- new_ldisc = tty_ldisc_get(tty, disc);
- if (IS_ERR(new_ldisc))
- return PTR_ERR(new_ldisc);
+ int retval, old_disc;
tty_lock(tty);
retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
}
/* Check the no-op case */
- if (tty->ldisc->ops->num == disc)
+ old_disc = tty->ldisc->ops->num;
+ if (old_disc == disc)
goto out;
if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
goto out;
}
- old_ldisc = tty->ldisc;
-
- /* Shutdown the old discipline. */
- tty_ldisc_close(tty, old_ldisc);
-
- /* Now set up the new line discipline. */
- tty->ldisc = new_ldisc;
- tty_set_termios_ldisc(tty, disc);
-
- retval = tty_ldisc_open(tty, new_ldisc);
+ retval = tty_ldisc_reinit(tty, disc);
if (retval < 0) {
/* Back to the old one or N_TTY if we can't */
- tty_ldisc_put(new_ldisc);
- tty_ldisc_restore(tty, old_ldisc);
+ if (tty_ldisc_reinit(tty, old_disc) < 0) {
+ pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
+ if (tty_ldisc_reinit(tty, N_TTY) < 0) {
+ /* At this point we have tty->ldisc == NULL. */
+ pr_err("tty: reinitializing N_TTY failed\n");
+ }
+ }
}
- if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
+ if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
+ tty->ops->set_ldisc) {
down_read(&tty->termios_rwsem);
tty->ops->set_ldisc(tty);
up_read(&tty->termios_rwsem);
}
- /* At this point we hold a reference to the new ldisc and a
- reference to the old ldisc, or we hold two references to
- the old ldisc (if it was restored as part of error cleanup
- above). In either case, releasing a single reference from
- the old ldisc is correct. */
- new_ldisc = old_ldisc;
out:
tty_ldisc_unlock(tty);
@@ -598,7 +553,6 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
already running */
tty_buffer_restart_work(tty->port);
err:
- tty_ldisc_put(new_ldisc); /* drop the extra reference */
tty_unlock(tty);
return retval;
}
@@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
int retval;
ld = tty_ldisc_get(tty, disc);
- if (IS_ERR(ld)) {
- BUG_ON(disc == N_TTY);
+ if (IS_ERR(ld))
return PTR_ERR(ld);
- }
if (tty->ldisc) {
tty_ldisc_close(tty, tty->ldisc);
@@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, tty->ldisc);
if (retval) {
- if (!WARN_ON(disc == N_TTY)) {
- tty_ldisc_put(tty->ldisc);
- tty->ldisc = NULL;
- }
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
}
return retval;
}
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index f03692e..8fb309a 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf,
break;
}
}
+
+ if (!data->bulk_out || !data->bulk_in) {
+ dev_err(&intf->dev, "bulk endpoints not found\n");
+ retcode = -ENODEV;
+ goto err_put;
+ }
+
/* Find int endpoint */
for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
endpoint = &iface_desc->endpoint[n].desc;
@@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf,
if (data->iin_ep_present) {
/* allocate int urb */
data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!data->iin_urb)
+ if (!data->iin_urb) {
+ retcode = -ENOMEM;
goto error_register;
+ }
/* Protect interrupt in endpoint data until iin_urb is freed */
kref_get(&data->kref);
@@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf,
/* allocate buffer for interrupt in */
data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
GFP_KERNEL);
- if (!data->iin_buffer)
+ if (!data->iin_buffer) {
+ retcode = -ENOMEM;
goto error_register;
+ }
/* fill interrupt urb */
usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1512,6 +1523,7 @@ static int usbtmc_probe(struct usb_interface *intf,
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
usbtmc_free_int(data);
+err_put:
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 25dbd8c..4be52c6 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
/*
* Adjust bInterval for quirked devices.
+ */
+ /*
+ * This quirk fixes bIntervals reported in ms.
+ */
+ if (to_usb_device(ddev)->quirks &
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+ n = clamp(fls(d->bInterval) + 3, i, j);
+ i = j = n;
+ }
+ /*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f0dd081..5286bf6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
- if (!udev->usb2_hw_lpm_capable)
+ if (!udev->usb2_hw_lpm_capable || !udev->bos)
return;
if (hub)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98..96b21b0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Baum Vario Ultra */
+ { USB_DEVICE(0x0904, 0x6101), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6102), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6103), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0d75158..79e7a34 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
+ unsigned int unmap_after_complete = false;
req->started = false;
list_del(&req->list);
@@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number <= 1)
+ /*
+ * NOTICE we don't want to unmap before calling ->complete() if we're
+ * dealing with a bounced ep0 request. If we unmap it here, we would end
+ * up overwritting the contents of req->buf and this could confuse the
+ * gadget driver.
+ */
+ if (dwc->ep0_bounced && dep->number <= 1) {
dwc->ep0_bounced = false;
-
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
+ unmap_after_complete = true;
+ } else {
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+ }
trace_dwc3_gadget_giveback(req);
@@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
+ if (unmap_after_complete)
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
+
if (dep->number > 1)
pm_runtime_put(dwc->dev);
}
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index a30766c..5e3828d 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm)
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 89b48bc..5eea448 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -367,7 +367,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
count = min_t(unsigned, count, hidg->report_length);
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
- status = copy_from_user(hidg->req->buf, buffer, count);
+ status = copy_from_user(req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
@@ -378,9 +378,9 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
spin_lock_irqsave(&hidg->write_spinlock, flags);
- /* we our function has been disabled by host */
+ /* when our function has been disabled by host */
if (!hidg->req) {
- free_ep_req(hidg->in_ep, hidg->req);
+ free_ep_req(hidg->in_ep, req);
/*
* TODO
* Should we fail with error here?
@@ -394,7 +394,7 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
req->complete = f_hidg_req_complete;
req->context = hidg;
- status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
+ status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5..f8a1881 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
+ /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
+ if (opts->streaming_maxburst &&
+ (opts->streaming_maxpacket % 1024) != 0) {
+ opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
+ INFO(cdev, "overriding streaming_maxpacket to %d\n",
+ opts->streaming_maxpacket);
+ }
+
/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
* module parameters.
*
@@ -625,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
uvc_ss_streaming_comp.wBytesPerInterval =
cpu_to_le16(max_packet_size * max_packet_mult *
- opts->streaming_maxburst);
+ (opts->streaming_maxburst + 1));
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index a97da64..8a365aa 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
pci_pool_free(dev->data_requests, td, addr);
- td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 8b9fd75..502bfe3 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface,
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 7717651..d3d1247 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf,
hdev = interface_to_usbdev(intf);
desc = intf->cur_altsetting;
+
+ if (desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &desc->endpoint[0].desc;
/* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index e45a3a6..07014ca 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf,
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 3) {
+ usb_put_dev(usbdev);
+ return -ENODEV;
+ }
+
/*
* Allocate parport interface
*/
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index d8bae6c..0c3664ab 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev)
musb_host_cleanup(musb);
musb_gadget_cleanup(musb);
- spin_lock_irqsave(&musb->lock, flags);
musb_platform_disable(musb);
+ spin_lock_irqsave(&musb->lock, flags);
musb_disable_interrupts(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 00e272b..355655f8 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data,
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
- if (cppi41_channel->is_tx)
- empty = musb_is_tx_fifo_empty(hw_ep);
+ if (cppi41_channel->is_tx) {
+ u8 type;
+
+ if (is_host_active(musb))
+ type = hw_ep->out_qh->type;
+ else
+ type = hw_ep->ep_in.type;
+
+ if (type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * Don't use the early-TX-interrupt workaround below
+ * for Isoch transfter. Since Isoch are periodic
+ * transfer, by the time the next transfer is
+ * scheduled, the current one should be done already.
+ *
+ * This avoids audio playback underrun issue.
+ */
+ empty = true;
+ else
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ }
if (!cppi41_channel->is_tx || empty) {
cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 7c047c4..9c7ee26 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev)
if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) {
ret = dsps_setup_optional_vbus_irq(pdev, glue);
if (ret)
- return ret;
+ goto err_iounmap;
}
platform_set_drvdata(pdev, glue);
@@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev)
err:
pm_runtime_disable(&pdev->dev);
+err_iounmap:
+ iounmap(glue->usbss_base);
return ret;
}
@@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev)
platform_device_unregister(glue->musb);
pm_runtime_disable(&pdev->dev);
+ iounmap(glue->usbss_base);
return 0;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e..af67a0d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb);
#define BANDRICH_PRODUCT_1012 0x1012
#define QUALCOMM_VENDOR_ID 0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20 0x9003
+#define QUECTEL_PRODUCT_UC15 0x9090
+
+#define QUECTEL_VENDOR_ID 0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EC25 0x0125
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ /* Quectel products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Quectel products using Quectel vendor ID */
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458d..38b3f0d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd..d01496f 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface,
int result;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0aa6c3c..35a1e77 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface,
struct hwarc *hwarc;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846..6345e85 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
result);
}
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 609f4f9..561084a 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref)
struct iommu_group *iommu_group = group->iommu_group;
WARN_ON(!list_empty(&group->device_list));
+ WARN_ON(group->notifier.head);
list_for_each_entry_safe(unbound, tmp,
&group->unbound_list, unbound_next) {
@@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
return -EBUSY;
}
+ /* Warn if previous user didn't cleanup and re-init to drop them */
+ if (WARN_ON(group->notifier.head))
+ BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
+
filep->private_data = group;
return 0;
@@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
filep->private_data = NULL;
- /* Any user didn't unregister? */
- WARN_ON(group->notifier.head);
-
vfio_group_try_dissolve_container(group);
atomic_dec(&group->opened);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c26fa1f..32d2633 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
-static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
- phys_addr_t *base)
+static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
{
struct list_head group_resv_regions;
struct iommu_resv_region *region, *next;
@@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
INIT_LIST_HEAD(&group_resv_regions);
iommu_get_group_resv_regions(group, &group_resv_regions);
list_for_each_entry(region, &group_resv_regions, list) {
- if (region->type & IOMMU_RESV_MSI) {
+ if (region->type == IOMMU_RESV_SW_MSI) {
*base = region->start;
ret = true;
goto out;
@@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_domain;
- resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+ resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index ce5e63d..44eed8e 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return len;
}
+static int
+vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ struct vhost_vsock *vsock;
+ struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0;
+ LIST_HEAD(freeme);
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ if (!vsock)
+ return -ENODEV;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+ if (pkt->vsk != vsk)
+ continue;
+ list_move(&pkt->list, &freeme);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ list_for_each_entry_safe(pkt, n, &freeme, list) {
+ if (pkt->reply)
+ cnt++;
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+
+ if (cnt) {
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int new_cnt;
+
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+ if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
+ vhost_poll_queue(&tx_vq->poll);
+ }
+
+ return 0;
+}
+
static struct virtio_vsock_pkt *
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
unsigned int out, unsigned int in)
@@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = {
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
+ .cancel_pkt = vhost_transport_cancel_pkt,
.dgram_enqueue = virtio_transport_dgram_enqueue,
.dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 4e119150..34adf9b 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
-static void update_balloon_stats(struct virtio_balloon *vb)
+static unsigned int update_balloon_stats(struct virtio_balloon *vb)
{
unsigned long events[NR_VM_EVENT_ITEMS];
struct sysinfo i;
- int idx = 0;
+ unsigned int idx = 0;
long available;
all_vm_events(events);
@@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb)
available = si_mem_available();
+#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
pages_to_bytes(i.totalram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
pages_to_bytes(available));
+
+ return idx;
}
/*
@@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
{
struct virtqueue *vq;
struct scatterlist sg;
- unsigned int len;
+ unsigned int len, num_stats;
- update_balloon_stats(vb);
+ num_stats = update_balloon_stats(vb);
vq = vb->stats_vq;
if (!virtqueue_get_buf(vq, &len))
return;
- sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+ sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL);
virtqueue_kick(vq);
}
@@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb)
vb->deflate_vq = vqs[1];
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
struct scatterlist sg;
+ unsigned int num_stats;
vb->stats_vq = vqs[2];
/*
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later (it can't be broken yet!).
*/
- sg_init_one(&sg, vb->stats, sizeof vb->stats);
+ num_stats = update_balloon_stats(vb);
+
+ sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
BUG();
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index df548a6..5905349 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -147,7 +147,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
- int i, err = -ENOMEM, allocated_vectors, nvectors;
+ int i, j, err = -ENOMEM, allocated_vectors, nvectors;
unsigned flags = PCI_IRQ_MSIX;
bool shared = false;
u16 msix_vec;
@@ -212,7 +212,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
if (!vp_dev->msix_vector_map)
goto out_disable_config_irq;
- allocated_vectors = 1; /* vector 0 is the config interrupt */
+ allocated_vectors = j = 1; /* vector 0 is the config interrupt */
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
vqs[i] = NULL;
@@ -236,18 +236,19 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
continue;
}
- snprintf(vp_dev->msix_names[i + 1],
+ snprintf(vp_dev->msix_names[j],
sizeof(*vp_dev->msix_names), "%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
vring_interrupt, IRQF_SHARED,
- vp_dev->msix_names[i + 1], vqs[i]);
+ vp_dev->msix_names[j], vqs[i]);
if (err) {
/* don't free this irq on error */
vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
goto out_remove_vqs;
}
vp_dev->msix_vector_map[i] = msix_vec;
+ j++;
/*
* Use a different vector for each queue if they are available,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index c77a075..f3bf8f4 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/refcount.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
int index;
int count;
int flags;
- atomic_t users;
+ refcount_t users;
struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->index = 0;
add->count = count;
- atomic_set(&add->users, 1);
+ refcount_set(&add->users, 1);
return add;
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
if (!map)
return;
- if (!atomic_dec_and_test(&map->users))
+ if (!refcount_dec_and_test(&map->users))
return;
atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("gntdev_vma_open %p\n", vma);
- atomic_inc(&map->users);
+ refcount_inc(&map->users);
}
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
}
- atomic_inc(&map->users);
+ refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4ce10bc..23e391d 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/syscore_ops.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <xen/xen.h>
-#include <xen/xen-ops.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup)
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
read_acpi_id, NULL, NULL, NULL);
- acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL);
upload:
if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) {
@@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void)
return rc;
}
-static int xen_acpi_processor_resume(struct notifier_block *nb,
- unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
{
+ int rc;
+
bitmap_zero(acpi_ids_done, nr_acpi_bits);
- return xen_upload_processor_pm_data();
+
+ rc = xen_upload_processor_pm_data();
+ if (rc != 0)
+ pr_info("ACPI data upload failed, error = %d\n", rc);
}
-struct notifier_block xen_acpi_processor_resume_nb = {
- .notifier_call = xen_acpi_processor_resume,
+static void xen_acpi_processor_resume(void)
+{
+ static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+ /*
+ * xen_upload_processor_pm_data() calls non-atomic code.
+ * However, the context for xen_acpi_processor_resume is syscore
+ * with only the boot CPU online and in an atomic context.
+ *
+ * So defer the upload for some point safer.
+ */
+ schedule_work(&wq);
+}
+
+static struct syscore_ops xap_syscore_ops = {
+ .resume = xen_acpi_processor_resume,
};
static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void)
if (rc)
goto err_unregister;
- xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+ register_syscore_ops(&xap_syscore_ops);
return 0;
err_unregister:
@@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void)
{
int i;
- xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+ unregister_syscore_ops(&xap_syscore_ops);
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index b29447e..25d404d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
{
struct afs_server *server;
struct afs_vnode *vnode, *xvnode;
- time_t now;
+ time64_t now;
long timeout;
int ret;
@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
_enter("");
- now = get_seconds();
+ now = ktime_get_real_seconds();
/* find the first vnode to update */
spin_lock(&server->cb_lock);
@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
- vnode->update_at = get_seconds() + afs_vnode_update_timeout;
+ vnode->update_at = ktime_get_real_seconds() +
+ afs_vnode_update_timeout;
spin_lock(&server->cb_lock);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2edbdcb..3062cce 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
struct afs_callback *cb;
struct afs_server *server;
__be32 *bp;
- u32 tmp;
int ret, loop;
_enter("{%u}", call->unmarshall);
@@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (ret < 0)
return ret;
- tmp = ntohl(call->tmp);
- _debug("CB count: %u", tmp);
- if (tmp != call->count && tmp != 0)
+ call->count2 = ntohl(call->tmp);
+ _debug("CB count: %u", call->count2);
+ if (call->count2 != call->count && call->count2 != 0)
return -EBADMSG;
call->offset = 0;
call->unmarshall++;
@@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, call->buffer,
- call->count * 3 * 4, false);
+ call->count2 * 3 * 4, false);
if (ret < 0)
return ret;
_debug("unmarshall CB array");
cb = call->request;
bp = call->buffer;
- for (loop = call->count; loop > 0; loop--, cb++) {
+ for (loop = call->count2; loop > 0; loop--, cb++) {
cb->version = ntohl(*bp++);
cb->expiry = ntohl(*bp++);
cb->type = ntohl(*bp++);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index ba7b71f..0d5b850 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
const struct file_operations afs_file_operations = {
.open = afs_open,
+ .flush = afs_flush,
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
@@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page)
if (!req)
goto enomem;
+ /* We request a full page. If the page is a partial one at the
+ * end of the file, the server will return a short read and the
+ * unmarshalling code will clear the unfilled space.
+ */
atomic_set(&req->usage, 1);
req->pos = (loff_t)page->index << PAGE_SHIFT;
- req->len = min_t(size_t, i_size_read(inode) - req->pos,
- PAGE_SIZE);
+ req->len = PAGE_SIZE;
req->nr_pages = 1;
req->pages[0] = page;
get_page(page);
@@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page)
fscache_uncache_page(vnode->cache, page);
#endif
BUG_ON(PageFsCache(page));
- goto error;
+
+ if (ret == -EINTR ||
+ ret == -ENOMEM ||
+ ret == -ERESTARTSYS ||
+ ret == -EAGAIN)
+ goto error;
+ goto io_error;
}
SetPageUptodate(page);
@@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page)
_leave(" = 0");
return 0;
+io_error:
+ SetPageError(page);
+ goto error;
enomem:
ret = -ENOMEM;
error:
- SetPageError(page);
unlock_page(page);
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index ac8e766..19f76ae 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -17,6 +17,12 @@
#include "afs_fs.h"
/*
+ * We need somewhere to discard into in case the server helpfully returns more
+ * than we asked for in FS.FetchData{,64}.
+ */
+static u8 afs_discard_buffer[64];
+
+/*
* decode an AFSFid block
*/
static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
@@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_mode = mode;
}
- vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
+ vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_version = data_version;
@@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
vnode->cb_version = ntohl(*bp++);
vnode->cb_expiry = ntohl(*bp++);
vnode->cb_type = ntohl(*bp++);
- vnode->cb_expires = vnode->cb_expiry + get_seconds();
+ vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds();
*_bp = bp;
}
@@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
void *buffer;
int ret;
- _enter("{%u,%zu/%u;%u/%llu}",
+ _enter("{%u,%zu/%u;%llu/%llu}",
call->unmarshall, call->offset, call->count,
req->remain, req->actual_len);
@@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
req->actual_len |= ntohl(call->tmp);
_debug("DATA length: %llu", req->actual_len);
- /* Check that the server didn't want to send us extra. We
- * might want to just discard instead, but that requires
- * cooperation from AF_RXRPC.
- */
- if (req->actual_len > req->len)
- return -EBADMSG;
req->remain = req->actual_len;
call->offset = req->pos & (PAGE_SIZE - 1);
@@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->unmarshall++;
begin_page:
+ ASSERTCMP(req->index, <, req->nr_pages);
if (req->remain > PAGE_SIZE - call->offset)
size = PAGE_SIZE - call->offset;
else
@@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
/* extract the returned data */
case 3:
- _debug("extract data %u/%llu %zu/%u",
+ _debug("extract data %llu/%llu %zu/%u",
req->remain, req->actual_len, call->offset, call->count);
buffer = kmap(req->pages[req->index]);
@@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (call->offset == PAGE_SIZE) {
if (req->page_done)
req->page_done(call, req);
+ req->index++;
if (req->remain > 0) {
- req->index++;
call->offset = 0;
+ if (req->index >= req->nr_pages) {
+ call->unmarshall = 4;
+ goto begin_discard;
+ }
goto begin_page;
}
}
+ goto no_more_data;
+
+ /* Discard any excess data the server gave us */
+ begin_discard:
+ case 4:
+ size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
+ call->count = size;
+ _debug("extract discard %llu/%llu %zu/%u",
+ req->remain, req->actual_len, call->offset, call->count);
+
+ call->offset = 0;
+ ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
+ req->remain -= call->offset;
+ if (ret < 0)
+ return ret;
+ if (req->remain > 0)
+ goto begin_discard;
no_more_data:
call->offset = 0;
- call->unmarshall++;
+ call->unmarshall = 5;
/* extract the metadata */
- case 4:
+ case 5:
ret = afs_extract_data(call, call->buffer,
(21 + 3 + 6) * 4, false);
if (ret < 0)
@@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->offset = 0;
call->unmarshall++;
- case 5:
+ case 6:
break;
}
- if (call->count < PAGE_SIZE) {
- buffer = kmap(req->pages[req->index]);
- memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap(req->pages[req->index]);
+ for (; req->index < req->nr_pages; req->index++) {
+ if (call->count < PAGE_SIZE)
+ zero_user_segment(req->pages[req->index],
+ call->count, PAGE_SIZE);
if (req->page_done)
req->page_done(call, req);
+ call->count = 0;
}
_leave(" = 0 [done]");
@@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server,
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
- *bp++ = htonl(AFS_SET_MODE);
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
@@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server,
memset(bp, 0, c_padsz);
bp = (void *) bp + c_padsz;
}
- *bp++ = htonl(AFS_SET_MODE);
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
@@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
- *bp++ = 0; /* mask */
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MTIME); /* mask */
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
@@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
_enter(",%x,{%x:%u},,",
key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
- size = to - offset;
+ size = (loff_t)to - (loff_t)offset;
if (first != last)
size += (loff_t)(last - first) << PAGE_SHIFT;
pos = (loff_t)first << PAGE_SHIFT;
@@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
- *bp++ = 0; /* mask */
- *bp++ = 0; /* mtime */
+ *bp++ = htonl(AFS_SET_MTIME); /* mask */
+ *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1e4897a..aae55dd 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_fop = &afs_dir_file_operations;
break;
case AFS_FTYPE_SYMLINK:
- inode->i_mode = S_IFLNK | vnode->status.mode;
- inode->i_op = &page_symlink_inode_operations;
+ /* Symlinks with a mode of 0644 are actually mountpoints. */
+ if ((vnode->status.mode & 0777) == 0644) {
+ inode->i_flags |= S_AUTOMOUNT;
+
+ spin_lock(&vnode->lock);
+ set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+ spin_unlock(&vnode->lock);
+
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_mntpt_inode_operations;
+ inode->i_fop = &afs_mntpt_file_operations;
+ } else {
+ inode->i_mode = S_IFLNK | vnode->status.mode;
+ inode->i_op = &page_symlink_inode_operations;
+ }
inode_nohighmem(inode);
break;
default:
@@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
set_nlink(inode, vnode->status.nlink);
inode->i_uid = vnode->status.owner;
- inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_gid = vnode->status.group;
inode->i_size = vnode->status.size;
- inode->i_ctime.tv_sec = vnode->status.mtime_server;
+ inode->i_ctime.tv_sec = vnode->status.mtime_client;
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_generation = vnode->fid.unique;
inode->i_version = vnode->status.data_version;
inode->i_mapping->a_ops = &afs_fs_aops;
-
- /* check to see whether a symbolic link is really a mountpoint */
- if (vnode->status.type == AFS_FTYPE_SYMLINK) {
- afs_mntpt_check_symlink(vnode, key);
-
- if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
- inode->i_mode = S_IFDIR | vnode->status.mode;
- inode->i_op = &afs_mntpt_inode_operations;
- inode->i_fop = &afs_mntpt_file_operations;
- }
- }
-
return 0;
}
@@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
vnode->cb_version = 0;
vnode->cb_expiry = 0;
vnode->cb_type = 0;
- vnode->cb_expires = get_seconds();
+ vnode->cb_expires = ktime_get_real_seconds();
} else {
vnode->cb_version = cb->version;
vnode->cb_expiry = cb->expiry;
vnode->cb_type = cb->type;
- vnode->cb_expires = vnode->cb_expiry + get_seconds();
+ vnode->cb_expires = vnode->cb_expiry +
+ ktime_get_real_seconds();
}
}
@@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
!test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
- if (vnode->cb_expires < get_seconds() + 10) {
+ if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
_debug("callback expired");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
} else {
@@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode)
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
- rcu_assign_pointer(vnode->permits, NULL);
+ RCU_INIT_POINTER(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
call_rcu(&permits->rcu, afs_zap_permits);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5dfa569..a690136 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/rxrpc.h>
@@ -90,7 +91,10 @@ struct afs_call {
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
unsigned first_offset; /* offset into mapping[first] */
- unsigned last_to; /* amount of mapping[last] */
+ union {
+ unsigned last_to; /* amount of mapping[last] */
+ unsigned count2; /* count used in unmarshalling */
+ };
unsigned char unmarshall; /* unmarshalling phase */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
@@ -127,12 +131,11 @@ struct afs_call_type {
*/
struct afs_read {
loff_t pos; /* Where to start reading */
- loff_t len; /* How much to read */
+ loff_t len; /* How much we're asking for */
loff_t actual_len; /* How much we're actually getting */
+ loff_t remain; /* Amount remaining */
atomic_t usage;
- unsigned int remain; /* Amount remaining */
unsigned int index; /* Which page we're reading into */
- unsigned int pg_offset; /* Offset in page we're at */
unsigned int nr_pages;
void (*page_done)(struct afs_call *, struct afs_read *);
struct page *pages[];
@@ -247,7 +250,7 @@ struct afs_cache_vhash {
*/
struct afs_vlocation {
atomic_t usage;
- time_t time_of_death; /* time at which put reduced usage to 0 */
+ time64_t time_of_death; /* time at which put reduced usage to 0 */
struct list_head link; /* link in cell volume location list */
struct list_head grave; /* link in master graveyard list */
struct list_head update; /* link in master update list */
@@ -258,7 +261,7 @@ struct afs_vlocation {
struct afs_cache_vlocation vldb; /* volume information DB record */
struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
wait_queue_head_t waitq; /* status change waitqueue */
- time_t update_at; /* time at which record should be updated */
+ time64_t update_at; /* time at which record should be updated */
spinlock_t lock; /* access lock */
afs_vlocation_state_t state; /* volume location state */
unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
@@ -271,7 +274,7 @@ struct afs_vlocation {
*/
struct afs_server {
atomic_t usage;
- time_t time_of_death; /* time at which put reduced usage to 0 */
+ time64_t time_of_death; /* time at which put reduced usage to 0 */
struct in_addr addr; /* server address */
struct afs_cell *cell; /* cell in which server resides */
struct list_head link; /* link in cell's server list */
@@ -374,8 +377,8 @@ struct afs_vnode {
struct rb_node server_rb; /* link in server->fs_vnodes */
struct rb_node cb_promise; /* link in server->cb_promises */
struct work_struct cb_broken_work; /* work to be done on callback break */
- time_t cb_expires; /* time at which callback expires */
- time_t cb_expires_at; /* time used to order cb_promise */
+ time64_t cb_expires; /* time at which callback expires */
+ time64_t cb_expires_at; /* time used to order cb_promise */
unsigned cb_version; /* callback version */
unsigned cb_expiry; /* callback expiry time */
afs_callback_type_t cb_type; /* type of callback */
@@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations;
extern const struct file_operations afs_mntpt_file_operations;
extern struct vfsmount *afs_d_automount(struct path *);
-extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
extern void afs_mntpt_kill_timer(void);
/*
@@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_writeback_all(struct afs_vnode *);
+extern int afs_flush(struct file *, fl_owner_t);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 91ea1aa..100b207 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
case RXKADDATALEN: return -EKEYREJECTED;
case RXKADILLEGALLEVEL: return -EKEYREJECTED;
+ case RXGEN_OPCODE: return -ENOTSUPP;
+
default: return -EREMOTEIO;
}
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index d4fb0af..bd3b65c 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -47,59 +47,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
/*
- * check a symbolic link to see whether it actually encodes a mountpoint
- * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
- */
-int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
-{
- struct page *page;
- size_t size;
- char *buf;
- int ret;
-
- _enter("{%x:%u,%u}",
- vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
-
- /* read the contents of the symlink into the pagecache */
- page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
- afs_page_filler, key);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto out;
- }
-
- ret = -EIO;
- if (PageError(page))
- goto out_free;
-
- buf = kmap(page);
-
- /* examine the symlink's contents */
- size = vnode->status.size;
- _debug("symlink to %*.*s", (int) size, (int) size, buf);
-
- if (size > 2 &&
- (buf[0] == '%' || buf[0] == '#') &&
- buf[size - 1] == '.'
- ) {
- _debug("symlink is a mountpoint");
- spin_lock(&vnode->lock);
- set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
- spin_unlock(&vnode->lock);
- }
-
- ret = 0;
-
- kunmap(page);
-out_free:
- put_page(page);
-out:
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
* no valid lookup procedure on this sort of dir
*/
static struct dentry *afs_mntpt_lookup(struct inode *dir,
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 419ef05..8f76b13 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call)
call->buffer = NULL;
}
+#define AFS_BVEC_MAX 8
+
+/*
+ * Load the given bvec with the next few pages.
+ */
+static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
+ struct bio_vec *bv, pgoff_t first, pgoff_t last,
+ unsigned offset)
+{
+ struct page *pages[AFS_BVEC_MAX];
+ unsigned int nr, n, i, to, bytes = 0;
+
+ nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
+ n = find_get_pages_contig(call->mapping, first, nr, pages);
+ ASSERTCMP(n, ==, nr);
+
+ msg->msg_flags |= MSG_MORE;
+ for (i = 0; i < nr; i++) {
+ to = PAGE_SIZE;
+ if (first + i >= last) {
+ to = call->last_to;
+ msg->msg_flags &= ~MSG_MORE;
+ }
+ bv[i].bv_page = pages[i];
+ bv[i].bv_len = to - offset;
+ bv[i].bv_offset = offset;
+ bytes += to - offset;
+ offset = 0;
+ }
+
+ iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+}
+
/*
* attach the data from a bunch of pages on an inode to a call
*/
static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
{
- struct page *pages[8];
- unsigned count, n, loop, offset, to;
+ struct bio_vec bv[AFS_BVEC_MAX];
+ unsigned int bytes, nr, loop, offset;
pgoff_t first = call->first, last = call->last;
int ret;
- _enter("");
-
offset = call->first_offset;
call->first_offset = 0;
do {
- _debug("attach %lx-%lx", first, last);
+ afs_load_bvec(call, msg, bv, first, last, offset);
+ offset = 0;
+ bytes = msg->msg_iter.count;
+ nr = msg->msg_iter.nr_segs;
- count = last - first + 1;
- if (count > ARRAY_SIZE(pages))
- count = ARRAY_SIZE(pages);
- n = find_get_pages_contig(call->mapping, first, count, pages);
- ASSERTCMP(n, ==, count);
-
- loop = 0;
- do {
- struct bio_vec bvec = {.bv_page = pages[loop],
- .bv_offset = offset};
- msg->msg_flags = 0;
- to = PAGE_SIZE;
- if (first + loop >= last)
- to = call->last_to;
- else
- msg->msg_flags = MSG_MORE;
- bvec.bv_len = to - offset;
- offset = 0;
-
- _debug("- range %u-%u%s",
- offset, to, msg->msg_flags ? " [more]" : "");
- iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
- &bvec, 1, to - offset);
-
- /* have to change the state *before* sending the last
- * packet as RxRPC might give us the reply before it
- * returns from sending the request */
- if (first + loop >= last)
- call->state = AFS_CALL_AWAIT_REPLY;
- ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
- msg, to - offset);
- if (ret < 0)
- break;
- } while (++loop < count);
- first += count;
-
- for (loop = 0; loop < count; loop++)
- put_page(pages[loop]);
+ /* Have to change the state *before* sending the last
+ * packet as RxRPC might give us the reply before it
+ * returns from sending the request.
+ */
+ if (first + nr - 1 >= last)
+ call->state = AFS_CALL_AWAIT_REPLY;
+ ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
+ msg, bytes);
+ for (loop = 0; loop < nr; loop++)
+ put_page(bv[loop].bv_page);
if (ret < 0)
break;
+
+ first += nr;
} while (first <= last);
- _leave(" = %d", ret);
return ret;
}
@@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
+ size_t offset;
+ u32 abort_code;
int ret;
_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
@@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
msg.msg_controllen = 0;
msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
- /* have to change the state *before* sending the last packet as RxRPC
- * might give us the reply before it returns from sending the
- * request */
+ /* We have to change the state *before* sending the last packet as
+ * rxrpc might give us the reply before it returns from sending the
+ * request. Further, if the send fails, we may already have been given
+ * a notification and may have collected it.
+ */
if (!call->send_pages)
call->state = AFS_CALL_AWAIT_REPLY;
ret = rxrpc_kernel_send_data(afs_socket, rxcall,
@@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
return afs_wait_for_call_to_complete(call);
error_do_abort:
- rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
+ call->state = AFS_CALL_COMPLETE;
+ if (ret != -ECONNABORTED) {
+ rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
+ -ret, "KSD");
+ } else {
+ abort_code = 0;
+ offset = 0;
+ rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
+ false, &abort_code);
+ ret = call->type->abort_to_error(abort_code);
+ }
error_kill_call:
afs_put_call(call);
_leave(" = %d", ret);
@@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call)
case -EINPROGRESS:
case -EAGAIN:
goto out;
+ case -ECONNABORTED:
+ goto call_complete;
case -ENOTCONN:
abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, -ret, "KNC");
- goto do_abort;
+ goto save_error;
case -ENOTSUPP:
- abort_code = RX_INVALID_OPERATION;
+ abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, -ret, "KIV");
- goto do_abort;
+ goto save_error;
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
@@ -471,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, EBADMSG, "KUM");
- goto do_abort;
+ goto save_error;
}
}
@@ -482,8 +505,9 @@ static void afs_deliver_to_call(struct afs_call *call)
_leave("");
return;
-do_abort:
+save_error:
call->error = ret;
+call_complete:
call->state = AFS_CALL_COMPLETE;
goto done;
}
@@ -493,7 +517,6 @@ static void afs_deliver_to_call(struct afs_call *call)
*/
static int afs_wait_for_call_to_complete(struct afs_call *call)
{
- const char *abort_why;
int ret;
DECLARE_WAITQUEUE(myself, current);
@@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
continue;
}
- abort_why = "KWC";
- ret = call->error;
- if (call->state == AFS_CALL_COMPLETE)
- break;
- abort_why = "KWI";
- ret = -EINTR;
- if (signal_pending(current))
+ if (call->state == AFS_CALL_COMPLETE ||
+ signal_pending(current))
break;
schedule();
}
@@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
- /* kill the call */
+ /* Kill off the call if it's still live. */
if (call->state < AFS_CALL_COMPLETE) {
- _debug("call incomplete");
+ _debug("call interrupted");
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- RX_CALL_DEAD, -ret, abort_why);
+ RX_USER_ABORT, -EINTR, "KWI");
}
+ ret = call->error;
_debug("call complete");
afs_put_call(call);
_leave(" = %d", ret);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 8d01042..ecb86a6 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
- rcu_assign_pointer(vnode->permits, NULL);
+ RCU_INIT_POINTER(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
} else {
if (!(access & AFS_ACE_LOOKUP))
goto permission_denied;
+ if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
+ goto permission_denied;
if (mask & (MAY_EXEC | MAY_READ)) {
if (!(access & AFS_ACE_READ))
goto permission_denied;
+ if (!(inode->i_mode & S_IRUSR))
+ goto permission_denied;
} else if (mask & MAY_WRITE) {
if (!(access & AFS_ACE_WRITE))
goto permission_denied;
+ if (!(inode->i_mode & S_IWUSR))
+ goto permission_denied;
}
}
key_put(key);
- ret = generic_permission(inode, mask);
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d4066ab..c001b1f2 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
spin_lock(&afs_server_graveyard_lock);
if (atomic_read(&server->usage) == 0) {
list_move_tail(&server->grave, &afs_server_graveyard);
- server->time_of_death = get_seconds();
+ server->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_server_reaper,
afs_server_timeout * HZ);
}
@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_server *server;
unsigned long delay, expiry;
- time_t now;
+ time64_t now;
- now = get_seconds();
+ now = ktime_get_real_seconds();
spin_lock(&afs_server_graveyard_lock);
while (!list_empty(&afs_server_graveyard)) {
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index d7d8dd8..37b7c3b 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
struct afs_vlocation *xvl;
/* wait at least 10 minutes before updating... */
- vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+ vl->update_at = ktime_get_real_seconds() +
+ afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
if (atomic_read(&vl->usage) == 0) {
_debug("buried");
list_move_tail(&vl->grave, &afs_vlocation_graveyard);
- vl->time_of_death = get_seconds();
+ vl->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_vlocation_reap,
afs_vlocation_timeout * HZ);
@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_vlocation *vl;
unsigned long delay, expiry;
- time_t now;
+ time64_t now;
_enter("");
- now = get_seconds();
+ now = ktime_get_real_seconds();
spin_lock(&afs_vlocation_graveyard_lock);
while (!list_empty(&afs_vlocation_graveyard)) {
@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
{
struct afs_cache_vlocation vldb;
struct afs_vlocation *vl, *xvl;
- time_t now;
+ time64_t now;
long timeout;
int ret;
_enter("");
- now = get_seconds();
+ now = ktime_get_real_seconds();
/* find a record to update */
spin_lock(&afs_vlocation_updates_lock);
@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
- vl->update_at = get_seconds() + afs_vlocation_update_timeout;
+ vl->update_at = ktime_get_real_seconds() +
+ afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c83c1a0..2d2fccd 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
* partly or wholly fill a page that's under preparation for writing
*/
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
- loff_t pos, struct page *page)
+ loff_t pos, unsigned int len, struct page *page)
{
struct afs_read *req;
- loff_t i_size;
int ret;
_enter(",,%llu", (unsigned long long)pos);
@@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
atomic_set(&req->usage, 1);
req->pos = pos;
+ req->len = len;
req->nr_pages = 1;
req->pages[0] = page;
-
- i_size = i_size_read(&vnode->vfs_inode);
- if (pos + PAGE_SIZE > i_size)
- req->len = i_size - pos;
- else
- req->len = PAGE_SIZE;
+ get_page(page);
ret = afs_vnode_fetch_data(vnode, key, req);
afs_put_read(req);
@@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
kfree(candidate);
return -ENOMEM;
}
- *pagep = page;
- /* page won't leak in error case: it eventually gets cleaned off LRU */
if (!PageUptodate(page) && len != PAGE_SIZE) {
- ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
+ ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
kfree(candidate);
_leave(" = %d [prep]", ret);
return ret;
@@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
SetPageUptodate(page);
}
+ /* page won't leak in error case: it eventually gets cleaned off LRU */
+ *pagep = page;
+
try_again:
spin_lock(&vnode->writeback_lock);
@@ -233,7 +231,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
if (wb->state == AFS_WBACK_PENDING)
wb->state = AFS_WBACK_CONFLICTING;
spin_unlock(&vnode->writeback_lock);
- if (PageDirty(page)) {
+ if (clear_page_dirty_for_io(page)) {
ret = afs_write_back_from_locked_page(wb, page);
if (ret < 0) {
afs_put_writeback(candidate);
@@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct key *key = file->private_data;
loff_t i_size, maybe_i_size;
+ int ret;
_enter("{%x:%u},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
spin_unlock(&vnode->writeback_lock);
}
+ if (!PageUptodate(page)) {
+ if (copied < len) {
+ /* Try and load any missing data from the server. The
+ * unmarshalling routine will take care of clearing any
+ * bits that are beyond the EOF.
+ */
+ ret = afs_fill_page(vnode, key, pos + copied,
+ len - copied, page);
+ if (ret < 0)
+ return ret;
+ }
+ SetPageUptodate(page);
+ }
+
set_page_dirty(page);
if (PageDirty(page))
_debug("dirtied");
@@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
- ClearPageUptodate(pv.pages[loop]);
+ struct page *page = pv.pages[loop];
+ ClearPageUptodate(page);
if (error)
- SetPageError(pv.pages[loop]);
- end_page_writeback(pv.pages[loop]);
+ SetPageError(page);
+ if (PageWriteback(page))
+ end_page_writeback(page);
+ if (page->index >= first)
+ first = page->index + 1;
}
__pagevec_release(&pv);
@@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
_enter(",%lx", primary_page->index);
count = 1;
- if (!clear_page_dirty_for_io(primary_page))
- BUG();
if (test_set_page_writeback(primary_page))
BUG();
@@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping,
*/
lock_page(page);
- if (page->mapping != mapping) {
+ if (page->mapping != mapping || !PageDirty(page)) {
unlock_page(page);
put_page(page);
continue;
}
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
-
- if (PageWriteback(page) || !PageDirty(page)) {
+ if (PageWriteback(page)) {
unlock_page(page);
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ wait_on_page_writeback(page);
+ put_page(page);
continue;
}
@@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping,
wb->state = AFS_WBACK_WRITING;
spin_unlock(&wb->vnode->writeback_lock);
+ if (!clear_page_dirty_for_io(page))
+ BUG();
ret = afs_write_back_from_locked_page(wb, page);
unlock_page(page);
put_page(page);
@@ -746,6 +764,20 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
/*
+ * Flush out all outstanding writes on a file opened for writing when it is
+ * closed.
+ */
+int afs_flush(struct file *file, fl_owner_t id)
+{
+ _enter("");
+
+ if ((file->f_mode & FMODE_WRITE) == 0)
+ return 0;
+
+ return vfs_fsync(file, 0);
+}
+
+/*
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal
*/
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 28e8192..8df7974 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping,
* can we find nothing at @index.
*/
ASSERT(page_ops & PAGE_LOCK);
- return ret;
+ err = -EAGAIN;
+ goto out;
}
for (i = 0; i < ret; i++) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c40060c..2315039 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
max_size = min_t(unsigned long, PAGE_SIZE, max_size);
ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size);
+
+ /*
+ * decompression code contains a memset to fill in any space between the end
+ * of the uncompressed data and the end of max_size in case the decompressed
+ * data ends up shorter than ram_bytes. That doesn't cover the hole between
+ * the end of an inline extent and the beginning of the next block, so we
+ * cover that region here.
+ */
+
+ if (max_size + pg_offset < PAGE_SIZE) {
+ char *map = kmap(page);
+ memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
+ kunmap(page);
+ }
kfree(tmp);
return ret;
}
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 02a7a92..6d6eca3 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page);
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
- struct fscrypt_info *ci;
int dir_has_key, cached_with_key;
if (flags & LOOKUP_RCU)
@@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
return 0;
}
- ci = d_inode(dir)->i_crypt_info;
- if (ci && ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD))))
- ci = NULL;
-
/* this should eventually be an flag in d_flags */
spin_lock(&dentry->d_lock);
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
spin_unlock(&dentry->d_lock);
- dir_has_key = (ci != NULL);
+ dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
dput(dir);
/*
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 13052b8..37b4989 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
fname->disk_name.len = iname->len;
return 0;
}
- ret = fscrypt_get_crypt_info(dir);
+ ret = fscrypt_get_encryption_info(dir);
if (ret && ret != -EOPNOTSUPP)
return ret;
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index fdbb8af..e39696e 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -67,7 +67,6 @@ struct fscrypt_info {
u8 ci_filename_mode;
u8 ci_flags;
struct crypto_skcipher *ci_ctfm;
- struct key *ci_keyring_key;
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
};
@@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode,
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
-/* keyinfo.c */
-extern int fscrypt_get_crypt_info(struct inode *);
-
#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index d5d896fa..8cdfddc 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
kfree(description);
if (IS_ERR(keyring_key))
return PTR_ERR(keyring_key);
+ down_read(&keyring_key->sem);
if (keyring_key->type != &key_type_logon) {
printk_once(KERN_WARNING
@@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
res = -ENOKEY;
goto out;
}
- down_read(&keyring_key->sem);
ukp = user_key_payload_locked(keyring_key);
if (ukp->datalen != sizeof(struct fscrypt_key)) {
res = -EINVAL;
- up_read(&keyring_key->sem);
goto out;
}
master_key = (struct fscrypt_key *)ukp->data;
@@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
"%s: key size incorrect: %d\n",
__func__, master_key->size);
res = -ENOKEY;
- up_read(&keyring_key->sem);
goto out;
}
res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
- up_read(&keyring_key->sem);
- if (res)
- goto out;
-
- crypt_info->ci_keyring_key = keyring_key;
- return 0;
out:
+ up_read(&keyring_key->sem);
key_put(keyring_key);
return res;
}
@@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (!ci)
return;
- key_put(ci->ci_keyring_key);
crypto_free_skcipher(ci->ci_ctfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}
-int fscrypt_get_crypt_info(struct inode *inode)
+int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
@@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode)
u8 *raw_key = NULL;
int res;
+ if (inode->i_crypt_info)
+ return 0;
+
res = fscrypt_initialize(inode->i_sb->s_cop->flags);
if (res)
return res;
if (!inode->i_sb->s_cop->get_context)
return -EOPNOTSUPP;
-retry:
- crypt_info = ACCESS_ONCE(inode->i_crypt_info);
- if (crypt_info) {
- if (!crypt_info->ci_keyring_key ||
- key_validate(crypt_info->ci_keyring_key) == 0)
- return 0;
- fscrypt_put_encryption_info(inode, crypt_info);
- goto retry;
- }
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (res < 0) {
@@ -229,7 +215,6 @@ int fscrypt_get_crypt_info(struct inode *inode)
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
crypt_info->ci_ctfm = NULL;
- crypt_info->ci_keyring_key = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
@@ -273,14 +258,8 @@ int fscrypt_get_crypt_info(struct inode *inode)
if (res)
goto out;
- kzfree(raw_key);
- raw_key = NULL;
- if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
- put_crypt_info(crypt_info);
- goto retry;
- }
- return 0;
-
+ if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
+ crypt_info = NULL;
out:
if (res == -ENOKEY)
res = 0;
@@ -288,6 +267,7 @@ int fscrypt_get_crypt_info(struct inode *inode)
kzfree(raw_key);
return res;
}
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
{
@@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
put_crypt_info(ci);
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);
-
-int fscrypt_get_encryption_info(struct inode *inode)
-{
- struct fscrypt_info *ci = inode->i_crypt_info;
-
- if (!ci ||
- (ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD)))))
- return fscrypt_get_crypt_info(inode);
- return 0;
-}
-EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 14b76da..4908906 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode,
const struct fscrypt_policy *policy)
{
struct fscrypt_context ctx;
- int res;
if (!inode->i_sb->s_cop->set_context)
return -EOPNOTSUPP;
- if (inode->i_sb->s_cop->prepare_context) {
- res = inode->i_sb->s_cop->prepare_context(inode);
- if (res)
- return res;
- }
-
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 30a9f21..375fb1c 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
set_buffer_uptodate(dir_block);
err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
if (err)
- goto out;
+ return err;
set_buffer_verified(dir_block);
-out:
- return err;
+ return ext4_mark_inode_dirty(handle, inode);
}
static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7385e6a..4247d8d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5400,7 +5400,7 @@ int ext4_getattr(const struct path *path, struct kstat *stat,
* If there is inline data in the inode, the inode will normally not
* have data blocks allocated (it may have an external xattr block).
* Report at least one sector for such files, so tools like tar, rsync,
- * others doen't incorrectly think the file is completely sparse.
+ * others don't incorrectly think the file is completely sparse.
*/
if (unlikely(ext4_has_inline_data(inode)))
stat->blocks += (stat->size + 511) >> 9;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 578f8c3..c992ef2 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode,
if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
(donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
ext4_debug("ext4 move extent: orig and donor's start "
- "offset are not alligned [ino:orig %lu, donor %lu]\n",
+ "offsets are not aligned [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 2e03a0a..a9448db 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}
-static int ext4_prepare_context(struct inode *inode)
-{
- return ext4_convert_inline_data(inode);
-}
-
static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
handle_t *handle = fs_data;
int res, res2, retries = 0;
+ res = ext4_convert_inline_data(inode);
+ if (res)
+ return res;
+
/*
* If a journal handle was specified, then the encryption context is
* being set on a new inode via inheritance and is part of a larger
@@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode)
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
- .prepare_context = ext4_prepare_context,
.set_context = ext4_set_context,
.dummy_context = ext4_dummy_context,
.is_encrypted = ext4_encrypted_inode,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 67636ac..996e790 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
}
static int ext4_xattr_block_csum_verify(struct inode *inode,
- sector_t block_nr,
- struct ext4_xattr_header *hdr)
+ struct buffer_head *bh)
{
- if (ext4_has_metadata_csum(inode->i_sb) &&
- (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
- return 0;
- return 1;
+ struct ext4_xattr_header *hdr = BHDR(bh);
+ int ret = 1;
+
+ if (ext4_has_metadata_csum(inode->i_sb)) {
+ lock_buffer(bh);
+ ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
+ bh->b_blocknr, hdr));
+ unlock_buffer(bh);
+ }
+ return ret;
}
static void ext4_xattr_block_csum_set(struct inode *inode,
- sector_t block_nr,
- struct ext4_xattr_header *hdr)
+ struct buffer_head *bh)
{
- if (!ext4_has_metadata_csum(inode->i_sb))
- return;
-
- hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
-}
-
-static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
- struct inode *inode,
- struct buffer_head *bh)
-{
- ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
- return ext4_handle_dirty_metadata(handle, inode, bh);
+ if (ext4_has_metadata_csum(inode->i_sb))
+ BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
+ bh->b_blocknr, BHDR(bh));
}
static inline const struct xattr_handler *
@@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1))
return -EFSCORRUPTED;
- if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+ if (!ext4_xattr_block_csum_verify(inode, bh))
return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
bh->b_data);
@@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
}
}
+ ext4_xattr_block_csum_set(inode, bh);
/*
* Beware of this ugliness: Releasing of xattr block references
* from different inodes can race and so we have to protect
* from a race where someone else frees the block (and releases
* its journal_head) before we are done dirtying the buffer. In
* nojournal mode this race is harmless and we actually cannot
- * call ext4_handle_dirty_xattr_block() with locked buffer as
+ * call ext4_handle_dirty_metadata() with locked buffer as
* that function can call sync_dirty_buffer() so for that case
* we handle the dirtying after unlocking the buffer.
*/
if (ext4_handle_valid(handle))
- error = ext4_handle_dirty_xattr_block(handle, inode,
- bh);
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
unlock_buffer(bh);
if (!ext4_handle_valid(handle))
- error = ext4_handle_dirty_xattr_block(handle, inode,
- bh);
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ext4_xattr_cache_insert(ext4_mb_cache,
bs->bh);
}
+ ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh);
if (error == -EFSCORRUPTED)
goto bad_block;
if (!error)
- error = ext4_handle_dirty_xattr_block(handle,
- inode,
- bs->bh);
+ error = ext4_handle_dirty_metadata(handle,
+ inode,
+ bs->bh);
if (error)
goto cleanup;
goto inserted;
@@ -967,10 +962,11 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ce->e_reusable = 0;
ea_bdebug(new_bh, "reusing; refcount now=%d",
ref);
+ ext4_xattr_block_csum_set(inode, new_bh);
unlock_buffer(new_bh);
- error = ext4_handle_dirty_xattr_block(handle,
- inode,
- new_bh);
+ error = ext4_handle_dirty_metadata(handle,
+ inode,
+ new_bh);
if (error)
goto cleanup_dquot;
}
@@ -1020,11 +1016,12 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
goto getblk_failed;
}
memcpy(new_bh->b_data, s->base, new_bh->b_size);
+ ext4_xattr_block_csum_set(inode, new_bh);
set_buffer_uptodate(new_bh);
unlock_buffer(new_bh);
ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
- error = ext4_handle_dirty_xattr_block(handle,
- inode, new_bh);
+ error = ext4_handle_dirty_metadata(handle, inode,
+ new_bh);
if (error)
goto cleanup;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index a77df37..ee2d0a48 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
si->base_mem += NM_I(sbi)->nat_blocks / 8;
+ si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
get_cache:
si->cache_mem = 0;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4650c9b..8d5c62b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry;
for (i = 0; i < slots; i++)
- clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+ __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
/* Let's check and deallocate this dentry page */
bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e849f83..0a6e115 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -561,6 +561,8 @@ struct f2fs_nm_info {
struct mutex build_lock; /* lock for build free nids */
unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
unsigned char *nat_block_bitmap;
+ unsigned short *free_nid_count; /* free nid count of NAT block */
+ spinlock_t free_nid_lock; /* protect updating of nid count */
/* for checkpoint */
char *nat_bitmap; /* NAT bitmap pointer */
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9496717..481aa8d 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
set_nat_flag(e, IS_CHECKPOINTED, false);
__set_nat_cache_dirty(nm_i, e);
- if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR)
- clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits);
-
/* update fsync_mark if its inode nat entry is still alive */
if (ni->nid != ni->ino)
e = __lookup_nat_cache(nm_i, ni->ino);
@@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
-void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+ bool set, bool build, bool locked)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
@@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set)
return;
if (set)
- set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
else
- clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+
+ if (!locked)
+ spin_lock(&nm_i->free_nid_lock);
+ if (set)
+ nm_i->free_nid_count[nat_ofs]++;
+ else if (!build)
+ nm_i->free_nid_count[nat_ofs]--;
+ if (!locked)
+ spin_unlock(&nm_i->free_nid_lock);
}
static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
- set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
+ if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+ return;
+
+ __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
i = start_nid % NAT_ENTRY_PER_BLOCK;
@@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR)
freed = add_free_nid(sbi, start_nid, true);
- update_free_nid_bitmap(sbi, start_nid, freed);
+ update_free_nid_bitmap(sbi, start_nid, freed, true, false);
}
}
@@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
for (i = 0; i < nm_i->nat_blocks; i++) {
if (!test_bit_le(i, nm_i->nat_block_bitmap))
continue;
+ if (!nm_i->free_nid_count[i])
+ continue;
for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
nid_t nid;
@@ -1907,58 +1919,6 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
up_read(&nm_i->nat_tree_lock);
}
-static int scan_nat_bits(struct f2fs_sb_info *sbi)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct page *page;
- unsigned int i = 0;
- nid_t nid;
-
- if (!enabled_nat_bits(sbi, NULL))
- return -EAGAIN;
-
- down_read(&nm_i->nat_tree_lock);
-check_empty:
- i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
- if (i >= nm_i->nat_blocks) {
- i = 0;
- goto check_partial;
- }
-
- for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK;
- nid++) {
- if (unlikely(nid >= nm_i->max_nid))
- break;
- add_free_nid(sbi, nid, true);
- }
-
- if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS)
- goto out;
- i++;
- goto check_empty;
-
-check_partial:
- i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
- if (i >= nm_i->nat_blocks) {
- disable_nat_bits(sbi, true);
- up_read(&nm_i->nat_tree_lock);
- return -EINVAL;
- }
-
- nid = i * NAT_ENTRY_PER_BLOCK;
- page = get_current_nat_page(sbi, nid);
- scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
-
- if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) {
- i++;
- goto check_partial;
- }
-out:
- up_read(&nm_i->nat_tree_lock);
- return 0;
-}
-
static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
if (nm_i->nid_cnt[FREE_NID_LIST])
return;
-
- /* try to find free nids with nat_bits */
- if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST])
- return;
- }
-
- /* find next valid candidate */
- if (enabled_nat_bits(sbi, NULL)) {
- int idx = find_next_zero_bit_le(nm_i->full_nat_bits,
- nm_i->nat_blocks, 0);
-
- if (idx >= nm_i->nat_blocks)
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- else
- nid = idx * NAT_ENTRY_PER_BLOCK;
}
/* readahead nat pages to be scanned */
@@ -2081,7 +2026,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
__insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
nm_i->available_nids--;
- update_free_nid_bitmap(sbi, *nid, false);
+ update_free_nid_bitmap(sbi, *nid, false, false, false);
spin_unlock(&nm_i->nid_list_lock);
return true;
@@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
nm_i->available_nids++;
- update_free_nid_bitmap(sbi, nid, true);
+ update_free_nid_bitmap(sbi, nid, true, false, false);
spin_unlock(&nm_i->nid_list_lock);
@@ -2383,7 +2328,7 @@ static void __adjust_nat_entry_set(struct nat_entry_set *nes,
list_add_tail(&nes->set_list, head);
}
-void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
+static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
struct page *page)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
valid++;
}
if (valid == 0) {
- set_bit_le(nat_index, nm_i->empty_nat_bits);
- clear_bit_le(nat_index, nm_i->full_nat_bits);
+ __set_bit_le(nat_index, nm_i->empty_nat_bits);
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
return;
}
- clear_bit_le(nat_index, nm_i->empty_nat_bits);
+ __clear_bit_le(nat_index, nm_i->empty_nat_bits);
if (valid == NAT_ENTRY_PER_BLOCK)
- set_bit_le(nat_index, nm_i->full_nat_bits);
+ __set_bit_le(nat_index, nm_i->full_nat_bits);
else
- clear_bit_le(nat_index, nm_i->full_nat_bits);
+ __clear_bit_le(nat_index, nm_i->full_nat_bits);
}
static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
add_free_nid(sbi, nid, false);
spin_lock(&NM_I(sbi)->nid_list_lock);
NM_I(sbi)->available_nids++;
- update_free_nid_bitmap(sbi, nid, true);
+ update_free_nid_bitmap(sbi, nid, true, false, false);
spin_unlock(&NM_I(sbi)->nid_list_lock);
} else {
spin_lock(&NM_I(sbi)->nid_list_lock);
- update_free_nid_bitmap(sbi, nid, false);
+ update_free_nid_bitmap(sbi, nid, false, false, false);
spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
@@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
return 0;
}
+inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int i = 0;
+ nid_t nid, last_nid;
+
+ if (!enabled_nat_bits(sbi, NULL))
+ return;
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
+ if (i >= nm_i->nat_blocks)
+ break;
+
+ __set_bit_le(i, nm_i->nat_block_bitmap);
+
+ nid = i * NAT_ENTRY_PER_BLOCK;
+ last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
+
+ spin_lock(&nm_i->free_nid_lock);
+ for (; nid < last_nid; nid++)
+ update_free_nid_bitmap(sbi, nid, true, true, true);
+ spin_unlock(&nm_i->free_nid_lock);
+ }
+
+ for (i = 0; i < nm_i->nat_blocks; i++) {
+ i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
+ if (i >= nm_i->nat_blocks)
+ break;
+
+ __set_bit_le(i, nm_i->nat_block_bitmap);
+ }
+}
+
static int init_node_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
@@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
return 0;
}
-int init_free_nid_cache(struct f2fs_sb_info *sbi)
+static int init_free_nid_cache(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi)
GFP_KERNEL);
if (!nm_i->nat_block_bitmap)
return -ENOMEM;
+
+ nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks *
+ sizeof(unsigned short), GFP_KERNEL);
+ if (!nm_i->free_nid_count)
+ return -ENOMEM;
+
+ spin_lock_init(&nm_i->free_nid_lock);
+
return 0;
}
@@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err)
return err;
+ /* load free nid status from nat_bits table */
+ load_free_nid_bitmap(sbi);
+
build_free_nids(sbi, true, true);
return 0;
}
@@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
kvfree(nm_i->nat_block_bitmap);
kvfree(nm_i->free_nid_bitmap);
+ kvfree(nm_i->free_nid_count);
kfree(nm_i->nat_bitmap);
kfree(nm_i->nat_bits);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 4bd7a8b..29ef708 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (f2fs_discard_en(sbi) &&
!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
+
+ /* don't overwrite by SSR to keep node chain */
+ if (se->type == CURSEG_WARM_NODE) {
+ if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
+ se->ckpt_valid_blocks++;
+ }
} else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
#ifdef CONFIG_F2FS_CHECK_FS
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a1a359b..5adc2fb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
/* Set up a default-sized revoke table for the new mount. */
err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
- if (err) {
- kfree(journal);
- return NULL;
- }
+ if (err)
+ goto err_cleanup;
spin_lock_init(&journal->j_history_lock);
@@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev,
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
GFP_KERNEL);
- if (!journal->j_wbuf) {
- kfree(journal);
- return NULL;
- }
+ if (!journal->j_wbuf)
+ goto err_cleanup;
bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
if (!bh) {
pr_err("%s: Cannot get buffer for journal superblock\n",
__func__);
- kfree(journal->j_wbuf);
- kfree(journal);
- return NULL;
+ goto err_cleanup;
}
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
+
+err_cleanup:
+ kfree(journal->j_wbuf);
+ jbd2_journal_destroy_revoke(journal);
+ kfree(journal);
+ return NULL;
}
/* jbd2_journal_init_dev and jbd2_journal_init_inode:
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index cfc38b5..f9aefcd 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
fail1:
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
+ journal->j_revoke_table[0] = NULL;
fail0:
return -ENOMEM;
}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 8e4dc7a..ac2dfe0 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
if (kn->flags & KERNFS_HAS_MMAP)
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
- kernfs_release_file(kn, of);
+ if (kn->flags & KERNFS_HAS_RELEASE)
+ kernfs_release_file(kn, of);
}
mutex_unlock(&kernfs_open_file_mutex);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index bb79972..7737745 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -232,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = {
.svo_module = THIS_MODULE,
};
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
[0] = &nfs40_cb_sv_ops,
[1] = &nfs41_cb_sv_ops,
};
#else
-struct svc_serv_ops *nfs4_cb_sv_ops[] = {
+static struct svc_serv_ops *nfs4_cb_sv_ops[] = {
[0] = &nfs40_cb_sv_ops,
[1] = NULL,
};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 91a8d61..390ada8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
return NULL;
}
-static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+/*
+ * Return true if @clp is done initializing, false if still working on it.
+ *
+ * Use nfs_client_init_status to check if it was successful.
+ */
+bool nfs_client_init_is_complete(const struct nfs_client *clp)
{
return clp->cl_cons_state <= NFS_CS_READY;
}
+EXPORT_SYMBOL_GPL(nfs_client_init_is_complete);
+
+/*
+ * Return 0 if @clp was successfully initialized, -errno otherwise.
+ *
+ * This must be called *after* nfs_client_init_is_complete() returns true,
+ * otherwise it will pop WARN_ON_ONCE and return -EINVAL
+ */
+int nfs_client_init_status(const struct nfs_client *clp)
+{
+ /* called without checking nfs_client_init_is_complete */
+ if (clp->cl_cons_state > NFS_CS_READY) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ return clp->cl_cons_state;
+}
+EXPORT_SYMBOL_GPL(nfs_client_init_status);
int nfs_wait_client_init_complete(const struct nfs_client *clp)
{
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index f956ca2..d913e81 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs4_pnfs_ds *ret = ds;
struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
+ int status;
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
@@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
if (ds->ds_clp)
goto out_test_devid;
- nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
dataserver_retrans, 4,
s->nfs_client->cl_minorversion);
+ if (status) {
+ nfs4_mark_deviceid_unavailable(devid);
+ ret = NULL;
+ goto out;
+ }
out_test_devid:
if (ret->ds_clp == NULL ||
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index f4f39b0..98b34c9 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
static inline bool
ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
{
- return nfs4_test_deviceid_unavailable(node);
+ /*
+ * Flexfiles should never mark a DS unavailable, but if it does
+ * print a (ratelimited) warning as this can affect performance.
+ */
+ if (nfs4_test_deviceid_unavailable(node)) {
+ u32 *p = (u32 *)node->deviceid.data;
+
+ pr_warn_ratelimited("NFS: flexfiles layout referencing an "
+ "unavailable device [%x%x%x%x]\n",
+ p[0], p[1], p[2], p[3]);
+ return true;
+ }
+ return false;
}
static inline int
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e5a6f24..85fde93 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -384,6 +384,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
struct inode *ino = lseg->pls_layout->plh_inode;
struct nfs_server *s = NFS_SERVER(ino);
unsigned int max_payload;
+ int status;
if (!ff_layout_mirror_valid(lseg, mirror, true)) {
pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -404,7 +405,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
/* FIXME: For now we assume the server sent only one version of NFS
* to use for the DS.
*/
- nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
+ status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
dataserver_retrans,
mirror->mirror_ds->ds_versions[0].version,
mirror->mirror_ds->ds_versions[0].minor_version);
@@ -420,11 +421,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
mirror->mirror_ds->ds_versions[0].wsize = max_payload;
goto out;
}
+out_fail:
ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
mirror, lseg->pls_range.offset,
lseg->pls_range.length, NFS4ERR_NXIO,
OP_ILLEGAL, GFP_NOIO);
-out_fail:
if (fail_return || !ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(ino, lseg);
ds = NULL;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 09ca5095..7b38fed 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *,
rpc_authflavor_t);
+extern bool nfs_client_init_is_complete(const struct nfs_client *clp);
+extern int nfs_client_init_status(const struct nfs_client *clp);
extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 5ae9d64..8346ccb 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
- if (server->rsize > server_resp_sz)
+ if (!server->rsize || server->rsize > server_resp_sz)
server->rsize = server_resp_sz;
- if (server->wsize > server_rqst_sz)
+ if (!server->wsize || server->wsize > server_rqst_sz)
server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1b18368..c780d98 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
return 0;
- /* even though OPEN succeeded, access is denied. Close the file */
- nfs4_close_state(state, fmode);
return -EACCES;
}
@@ -7427,11 +7425,11 @@ static void nfs4_exchange_id_release(void *data)
struct nfs41_exchange_id_data *cdata =
(struct nfs41_exchange_id_data *)data;
- nfs_put_client(cdata->args.client);
if (cdata->xprt) {
xprt_put(cdata->xprt);
rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
}
+ nfs_put_client(cdata->args.client);
kfree(cdata->res.impl_id);
kfree(cdata->res.server_scope);
kfree(cdata->res.server_owner);
@@ -7538,10 +7536,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
task_setup_data.callback_data = calldata;
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out_impl_id;
- }
+ if (IS_ERR(task))
+ return PTR_ERR(task);
if (!xprt) {
status = rpc_wait_for_completion_task(task);
@@ -7569,6 +7565,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
kfree(calldata->res.server_owner);
out_calldata:
kfree(calldata);
+ nfs_put_client(clp);
goto out;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index f0369e3..80ce289 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
if (len <= 0)
goto out;
dprintk("%s: name=%s\n", __func__, group_name->data);
- return NFS_ATTR_FATTR_OWNER_NAME;
+ return NFS_ATTR_FATTR_GROUP_NAME;
} else {
len = xdr_stream_decode_opaque_inline(xdr, (void **)&p,
XDR_MAX_NETOBJ);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 63f77b49..590e1e3 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds);
struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
gfp_t gfp_flags);
void nfs4_pnfs_v3_ds_connect_unload(void);
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
struct nfs4_deviceid_node *devid, unsigned int timeo,
unsigned int retrans, u32 version, u32 minor_version);
struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 9414b49..7250b95 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -745,15 +745,17 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
/*
* Create an rpc connection to the nfs4_pnfs_ds data server.
* Currently only supports IPv4 and IPv6 addresses.
- * If connection fails, make devid unavailable.
+ * If connection fails, make devid unavailable and return a -errno.
*/
-void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
+int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
struct nfs4_deviceid_node *devid, unsigned int timeo,
unsigned int retrans, u32 version, u32 minor_version)
{
- if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
- int err = 0;
+ int err;
+again:
+ err = 0;
+ if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
if (version == 3) {
err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
retrans);
@@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
err = -EPROTONOSUPPORT;
}
- if (err)
- nfs4_mark_deviceid_unavailable(devid);
nfs4_clear_ds_conn_bit(ds);
} else {
nfs4_wait_ds_connect(ds);
+
+ /* what was waited on didn't connect AND didn't mark unavail */
+ if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
+ goto again;
}
+
+ /*
+ * At this point the ds->ds_clp should be ready, but it might have
+ * hit an error.
+ */
+ if (!err) {
+ if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
+ WARN_ON_ONCE(ds->ds_clp ||
+ !nfs4_test_deviceid_unavailable(devid));
+ return -EINVAL;
+ }
+ err = nfs_client_init_status(ds->ds_clp);
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e75b056..abb2c8a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
(long long)req_offset(req));
if (status < 0) {
nfs_context_set_write_error(req->wb_context, status);
- nfs_inode_remove_request(req);
+ if (req->wb_page)
+ nfs_inode_remove_request(req);
dprintk_cont(", error = %d\n", status);
goto next;
}
@@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
* returned by the server against all stored verfs. */
if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
/* We have a match */
- nfs_inode_remove_request(req);
+ if (req->wb_page)
+ nfs_inode_remove_request(req);
dprintk_cont(" OK\n");
goto next;
}
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index d04547f..eb00bc1 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -125,6 +125,8 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
+ int size);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff..96b45cd 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,93 @@ xfs_dir2_sf_check(
}
#endif /* DEBUG */
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *sfp,
+ int size)
+{
+ struct xfs_dir2_sf_entry *sfep;
+ struct xfs_dir2_sf_entry *next_sfep;
+ char *endp;
+ const struct xfs_dir_ops *dops;
+ xfs_ino_t ino;
+ int i;
+ int i8count;
+ int offset;
+ __uint8_t filetype;
+
+ dops = xfs_dir_get_ops(mp, NULL);
+
+ /*
+ * Give up if the directory is way too short.
+ */
+ XFS_WANT_CORRUPTED_RETURN(mp, size >
+ offsetof(struct xfs_dir2_sf_hdr, parent));
+ XFS_WANT_CORRUPTED_RETURN(mp, size >=
+ xfs_dir2_sf_hdr_size(sfp->i8count));
+
+ endp = (char *)sfp + size;
+
+ /* Check .. entry */
+ ino = dops->sf_get_parent_ino(sfp);
+ i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+ XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+ offset = dops->data_first_offset;
+
+ /* Check all reported entries */
+ sfep = xfs_dir2_sf_firstentry(sfp);
+ for (i = 0; i < sfp->count; i++) {
+ /*
+ * struct xfs_dir2_sf_entry has a variable length.
+ * Check the fixed-offset parts of the structure are
+ * within the data buffer.
+ */
+ XFS_WANT_CORRUPTED_RETURN(mp,
+ ((char *)sfep + sizeof(*sfep)) < endp);
+
+ /* Don't allow names with known bad length. */
+ XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
+ XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+
+ /*
+ * Check that the variable-length part of the structure is
+ * within the data buffer. The next entry starts after the
+ * name component, so nextentry is an acceptable test.
+ */
+ next_sfep = dops->sf_nextentry(sfp, sfep);
+ XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+
+ /* Check that the offsets always increase. */
+ XFS_WANT_CORRUPTED_RETURN(mp,
+ xfs_dir2_sf_get_offset(sfep) >= offset);
+
+ /* Check the inode number. */
+ ino = dops->sf_get_ino(sfp, sfep);
+ i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+ XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+
+ /* Check the file type. */
+ filetype = dops->sf_get_ftype(sfep);
+ XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+
+ offset = xfs_dir2_sf_get_offset(sfep) +
+ dops->data_entsize(sfep->namelen);
+
+ sfep = next_sfep;
+ }
+ XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
+ XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+
+ /* Make sure this whole thing ought to be in local format. */
+ XFS_WANT_CORRUPTED_RETURN(mp, offset +
+ (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+
+ return 0;
+}
+
/*
* Create a new (shortform) directory.
*/
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e07..9653e96 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
#include "xfs_trace.h"
#include "xfs_attr_sf.h"
#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
kmem_zone_t *xfs_ifork_zone;
@@ -320,6 +322,7 @@ xfs_iformat_local(
int whichfork,
int size)
{
+ int error;
/*
* If the size is unreasonable, then something
@@ -336,6 +339,14 @@ xfs_iformat_local(
return -EFSCORRUPTED;
}
+ if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+ error = xfs_dir2_sf_verify(ip->i_mount,
+ (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
+ size);
+ if (error)
+ return error;
+ }
+
xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
return 0;
}
@@ -856,7 +867,7 @@ xfs_iextents_copy(
* In these cases, the format always takes precedence, because the
* format indicates the current state of the fork.
*/
-void
+int
xfs_iflush_fork(
xfs_inode_t *ip,
xfs_dinode_t *dip,
@@ -866,6 +877,7 @@ xfs_iflush_fork(
char *cp;
xfs_ifork_t *ifp;
xfs_mount_t *mp;
+ int error;
static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
static const short dataflag[2] =
@@ -874,7 +886,7 @@ xfs_iflush_fork(
{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
if (!iip)
- return;
+ return 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
* This can happen if we gave up in iformat in an error path,
@@ -882,12 +894,19 @@ xfs_iflush_fork(
*/
if (!ifp) {
ASSERT(whichfork == XFS_ATTR_FORK);
- return;
+ return 0;
}
cp = XFS_DFORK_PTR(dip, whichfork);
mp = ip->i_mount;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
case XFS_DINODE_FMT_LOCAL:
+ if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
+ error = xfs_dir2_sf_verify(mp,
+ (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
+ ifp->if_bytes);
+ if (error)
+ return error;
+ }
if ((iip->ili_fields & dataflag[whichfork]) &&
(ifp->if_bytes > 0)) {
ASSERT(ifp->if_u1.if_data != NULL);
@@ -940,6 +959,7 @@ xfs_iflush_fork(
ASSERT(0);
break;
}
+ return 0;
}
/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 7fb8365..132dc59 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
void xfs_idestroy_fork(struct xfs_inode *, int);
void xfs_idata_realloc(struct xfs_inode *, int, int);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 003a99b..ad9396e 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@ xfs_dir2_sf_getdents(
struct xfs_da_geometry *geo = args->geo;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Give up if the directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
-
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
- return -EFSCORRUPTED;
-
/*
* If the block number in the offset is out of range, we're done.
*/
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7eaf1ef..c7fe2c2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3475,6 +3475,7 @@ xfs_iflush_int(
struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_dinode *dip;
struct xfs_mount *mp = ip->i_mount;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip));
@@ -3557,9 +3558,14 @@ xfs_iflush_int(
if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
ip->i_d.di_flushiter = 0;
- xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
- if (XFS_IFORK_Q(ip))
- xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+ error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+ if (error)
+ return error;
+ if (XFS_IFORK_Q(ip)) {
+ error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
+ if (error)
+ return error;
+ }
xfs_inobp_check(mp, bp);
/*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 673acda..9b05886 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
}
/* Validate the processor object's proc_id */
-bool acpi_processor_validate_proc_id(int proc_id);
+bool acpi_duplicate_processor_id(int proc_id);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
int *pcpu);
int acpi_unmap_cpu(int cpu);
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-void acpi_set_processor_mapping(void);
-
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c71dd8f..c41b8d99 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -556,7 +556,7 @@ enum ccp_engine {
* struct ccp_cmd - CCP operation request
* @entry: list element (ccp driver use only)
* @work: work element used for callbacks (ccp driver use only)
- * @ccp: CCP device to be run on (ccp driver use only)
+ * @ccp: CCP device to be run on
* @ret: operation return code (ccp driver use only)
* @flags: cmd processing flags
* @engine: CCP operation to perform
diff --git a/include/linux/device.h b/include/linux/device.h
index 30c4570e..9ef518af 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
-void assert_held_device_hotplug(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index 9ca23fc..6fdfc88 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -20,6 +20,8 @@ struct sock_exterr_skb {
struct sock_extended_err ee;
u16 addr_offset;
__be16 port;
+ u8 opt_stats:1,
+ unused:7;
};
#endif
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 547f815..10c1abf 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -87,7 +87,6 @@ struct fscrypt_operations {
unsigned int flags;
const char *key_prefix;
int (*get_context)(struct inode *, void *, size_t);
- int (*prepare_context)(struct inode *);
int (*set_context)(struct inode *, const void *, size_t, void *);
int (*dummy_context)(struct inode *);
bool (*is_encrypted)(struct inode *);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 2484b2f..933d936 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
struct fwnode_handle *child,
enum gpiod_flags flags,
const char *label);
-/* FIXME: delete this helper when users are switched over */
-static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
- const char *con_id, struct fwnode_handle *child)
-{
- return devm_fwnode_get_index_gpiod_from_child(dev, con_id,
- 0, child,
- GPIOD_ASIS,
- "?");
-}
#else /* CONFIG_GPIOLIB */
@@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
return ERR_PTR(-ENOSYS);
}
-/* FIXME: delete this when all users are switched over */
-static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
- const char *con_id, struct fwnode_handle *child)
-{
- return ERR_PTR(-ENOSYS);
-}
-
#endif /* CONFIG_GPIOLIB */
static inline
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 78d59db..88b6737 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -88,6 +88,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
+#define HWMON_T_ALARM BIT(hwmon_temp_alarm)
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 62bbf3c..970771a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -845,6 +845,13 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
+
+ /*
+ * Defer freeing channel until after all cpu's have
+ * gone through grace period.
+ */
+ struct rcu_head rcu;
+
/*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
@@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_event_tasklet_disable(struct vmbus_channel *channel);
-void hv_event_tasklet_enable(struct vmbus_channel *channel);
-
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
void vmbus_setevent(struct vmbus_channel *channel);
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index 23ca4151..fa79319 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
struct config_item_type *type)
{
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&d->group, name, type);
#endif
}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 6a6de18..2e4de0d 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -125,9 +125,16 @@ enum iommu_attr {
};
/* These are the possible reserved region types */
-#define IOMMU_RESV_DIRECT (1 << 0)
-#define IOMMU_RESV_RESERVED (1 << 1)
-#define IOMMU_RESV_MSI (1 << 2)
+enum iommu_resv_type {
+ /* Memory regions which must be mapped 1:1 at all times */
+ IOMMU_RESV_DIRECT,
+ /* Arbitrary "never map this or give it to a device" address ranges */
+ IOMMU_RESV_RESERVED,
+ /* Hardware MSI region (untranslated) */
+ IOMMU_RESV_MSI,
+ /* Software-managed MSI translation window */
+ IOMMU_RESV_SW_MSI,
+};
/**
* struct iommu_resv_region - descriptor for a reserved memory region
@@ -142,7 +149,7 @@ struct iommu_resv_region {
phys_addr_t start;
size_t length;
int prot;
- int type;
+ enum iommu_resv_type type;
};
#ifdef CONFIG_IOMMU_API
@@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev);
extern struct iommu_resv_region *
-iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
+iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
+ enum iommu_resv_type type);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 1c823be..5734480c9 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -6,6 +6,7 @@
struct kmem_cache;
struct page;
struct vm_struct;
+struct task_struct;
#ifdef CONFIG_KASAN
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c14ad9..d025074 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
gpa_t addr);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7e66e4f..1beb1ec 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,6 +476,7 @@ enum {
enum {
MLX4_INTERFACE_STATE_UP = 1 << 0,
MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+ MLX4_INTERFACE_STATE_NOWAIT = 1 << 2,
};
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 35d0fd7..fd0de00 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -76,22 +76,12 @@ struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
-#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
-extern int gpmc_nand_init(struct omap_nand_platform_data *d,
- struct gpmc_timings *gpmc_t);
-#else
-static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
- struct gpmc_timings *gpmc_t)
-{
- return 0;
-}
-#endif
-
#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
#else
#define board_onenand_data NULL
-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
{
+ return 0;
}
#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b6e75c9..24a6358 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -165,6 +165,13 @@ struct hw_perf_event {
struct list_head bp_list;
};
#endif
+ struct { /* amd_iommu */
+ u8 iommu_bank;
+ u8 iommu_cntr;
+ u16 padding;
+ u64 conf;
+ u64 conf1;
+ };
};
/*
* If the event is a per task event, this will point to the task in
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 86b4ed7..96fb139 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev)
static inline int reset_control_reset(struct reset_control *rstc)
{
- WARN_ON(1);
return 0;
}
static inline int reset_control_assert(struct reset_control *rstc)
{
- WARN_ON(1);
return 0;
}
static inline int reset_control_deassert(struct reset_control *rstc)
{
- WARN_ON(1);
return 0;
}
static inline int reset_control_status(struct reset_control *rstc)
{
- WARN_ON(1);
return 0;
}
static inline void reset_control_put(struct reset_control *rstc)
{
- WARN_ON(1);
}
static inline int __must_check device_reset(struct device *dev)
@@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get(
const char *id, int index, bool shared,
bool optional)
{
- return ERR_PTR(-ENOTSUPP);
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
int index, bool shared, bool optional)
{
- return ERR_PTR(-ENOTSUPP);
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
#endif /* CONFIG_RESET_CONTROLLER */
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043d..de2a722 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
/* device can't handle Link Power Management */
#define USB_QUIRK_NO_LPM BIT(10)
+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 9638bfe..584f9a6 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -48,6 +48,8 @@ struct virtio_vsock_pkt {
struct virtio_vsock_hdr hdr;
struct work_struct work;
struct list_head list;
+ /* socket refcnt not held, only use for cancellation */
+ struct vsock_sock *vsk;
void *buf;
u32 len;
u32 off;
@@ -56,6 +58,7 @@ struct virtio_vsock_pkt {
struct virtio_vsock_pkt_info {
u32 remote_cid, remote_port;
+ struct vsock_sock *vsk;
struct msghdr *msg;
u32 pkt_len;
u16 type;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f275896..f32ed9a 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -100,6 +100,9 @@ struct vsock_transport {
void (*destruct)(struct vsock_sock *);
void (*release)(struct vsock_sock *);
+ /* Cancel all pending packets sent on vsock. */
+ int (*cancel_pkt)(struct vsock_sock *vsk);
+
/* Connections. */
int (*connect)(struct vsock_sock *);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f540f9a..19605878 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
u32 seq);
/* Fake conntrack entry for untracked connections */
-DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void)
{
return raw_cpu_ptr(&nf_conntrack_untracked);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2aa8a9d..01360286 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -103,6 +103,35 @@ struct nft_regs {
};
};
+/* Store/load an u16 or u8 integer to/from the u32 data register.
+ *
+ * Note, when using concatenations, register allocation happens at 32-bit
+ * level. So for store instruction, pad the rest part with zero to avoid
+ * garbage values.
+ */
+
+static inline void nft_reg_store16(u32 *dreg, u16 val)
+{
+ *dreg = 0;
+ *(u16 *)dreg = val;
+}
+
+static inline void nft_reg_store8(u32 *dreg, u8 val)
+{
+ *dreg = 0;
+ *(u8 *)dreg = val;
+}
+
+static inline u16 nft_reg_load16(u32 *sreg)
+{
+ return *(u16 *)sreg;
+}
+
+static inline u8 nft_reg_load8(u32 *sreg)
+{
+ return *(u8 *)sreg;
+}
+
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len)
{
@@ -203,7 +232,6 @@ struct nft_set_elem {
struct nft_set;
struct nft_set_iter {
u8 genmask;
- bool flush;
unsigned int count;
unsigned int skip;
int err;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d150b50..97983d1 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
+ unsigned int flags = IP6_FH_F_AUTH;
int protohdr, thoff = 0;
unsigned short frag_off;
nft_set_pktinfo(pkt, skb, state);
- protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0) {
nft_set_pktinfo_proto_unspec(pkt, skb);
return;
@@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
const struct nf_hook_state *state)
{
#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
struct ipv6hdr *ip6h, _ip6h;
unsigned int thoff = 0;
unsigned short frag_off;
@@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
if (pkt_len + sizeof(*ip6h) > skb->len)
return -1;
- protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
if (protohdr < 0)
return -1;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 07a0b12..592dece 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -83,6 +83,7 @@ struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_ep_common;
struct crypto_shash;
+struct sctp_stream;
#include <net/sctp/tsnmap.h>
@@ -753,6 +754,8 @@ struct sctp_transport {
/* Is the Path MTU update pending on this tranport */
pmtu_pending:1,
+ dst_pending_confirm:1, /* need to confirm neighbour */
+
/* Has this transport moved the ctsn since we last sacked */
sack_generation:1;
u32 dst_cookie;
@@ -806,8 +809,6 @@ struct sctp_transport {
__u32 burst_limited; /* Holds old cwnd when max.burst is applied */
- __u32 dst_pending_confirm; /* need to confirm neighbour */
-
/* Destination */
struct dst_entry *dst;
/* Source address. */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0f1813c..99e4423 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1863,6 +1863,9 @@ struct ib_port_immutable {
};
struct ib_device {
+ /* Do not access @dma_device directly from ULP nor from HW drivers. */
+ struct device *dma_device;
+
char name[IB_DEVICE_NAME_MAX];
struct list_head event_handler_list;
@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
*/
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
- return dma_mapping_error(&dev->dev, dma_addr);
+ return dma_mapping_error(dev->dma_device, dma_addr);
}
/**
@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- return dma_map_single(&dev->dev, cpu_addr, size, direction);
+ return dma_map_single(dev->dma_device, cpu_addr, size, direction);
}
/**
@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- dma_unmap_single(&dev->dev, addr, size, direction);
+ dma_unmap_single(dev->dma_device, addr, size, direction);
}
/**
@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
size_t size,
enum dma_data_direction direction)
{
- return dma_map_page(&dev->dev, page, offset, size, direction);
+ return dma_map_page(dev->dma_device, page, offset, size, direction);
}
/**
@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- dma_unmap_page(&dev->dev, addr, size, direction);
+ dma_unmap_page(dev->dma_device, addr, size, direction);
}
/**
@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- return dma_map_sg(&dev->dev, sg, nents, direction);
+ return dma_map_sg(dev->dma_device, sg, nents, direction);
}
/**
@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- dma_unmap_sg(&dev->dev, sg, nents, direction);
+ dma_unmap_sg(dev->dma_device, sg, nents, direction);
}
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
+ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
}
/**
@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single_for_device(&dev->dev, addr, size, dir);
+ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
}
/**
@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
dma_addr_t *dma_handle,
gfp_t flag)
{
- return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
+ return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
}
/**
@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
- dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
+ dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
}
/**
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b54b98d..1b0f447 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -4,7 +4,12 @@
#include <linux/types.h>
#include <target/target_core_base.h>
-#define TRANSPORT_FLAG_PASSTHROUGH 1
+#define TRANSPORT_FLAG_PASSTHROUGH 0x1
+/*
+ * ALUA commands, state checks and setup operations are handled by the
+ * backend module.
+ */
+#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
struct request_queue;
struct scatterlist;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 37c274e..4b784b6 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp {
struct list_head tg_pt_gp_lun_list;
struct se_lun *tg_pt_gp_alua_lun;
struct se_node_acl *tg_pt_gp_alua_nacl;
- struct delayed_work tg_pt_gp_transition_work;
+ struct work_struct tg_pt_gp_transition_work;
struct completion *tg_pt_gp_transition_complete;
};
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 9b1462e..a076cf1 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
#define __NR_pkey_free 290
__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 291
+__SYSCALL(__NR_statx, sys_statx)
#undef __NR_syscalls
-#define __NR_syscalls 291
+#define __NR_syscalls 292
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 407cb55..7fb9786 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -33,8 +33,8 @@ extern "C" {
#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
struct drm_omap_param {
- uint64_t param; /* in */
- uint64_t value; /* in (set_param), out (get_param) */
+ __u64 param; /* in */
+ __u64 value; /* in (set_param), out (get_param) */
};
#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
@@ -53,18 +53,18 @@ struct drm_omap_param {
#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
union omap_gem_size {
- uint32_t bytes; /* (for non-tiled formats) */
+ __u32 bytes; /* (for non-tiled formats) */
struct {
- uint16_t width;
- uint16_t height;
+ __u16 width;
+ __u16 height;
} tiled; /* (for tiled formats) */
};
struct drm_omap_gem_new {
union omap_gem_size size; /* in */
- uint32_t flags; /* in */
- uint32_t handle; /* out */
- uint32_t __pad;
+ __u32 flags; /* in */
+ __u32 handle; /* out */
+ __u32 __pad;
};
/* mask of operations: */
@@ -74,33 +74,33 @@ enum omap_gem_op {
};
struct drm_omap_gem_cpu_prep {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
+ __u32 handle; /* buffer handle (in) */
+ __u32 op; /* mask of omap_gem_op (in) */
};
struct drm_omap_gem_cpu_fini {
- uint32_t handle; /* buffer handle (in) */
- uint32_t op; /* mask of omap_gem_op (in) */
+ __u32 handle; /* buffer handle (in) */
+ __u32 op; /* mask of omap_gem_op (in) */
/* TODO maybe here we pass down info about what regions are touched
* by sw so we can be clever about cache ops? For now a placeholder,
* set to zero and we just do full buffer flush..
*/
- uint32_t nregions;
- uint32_t __pad;
+ __u32 nregions;
+ __u32 __pad;
};
struct drm_omap_gem_info {
- uint32_t handle; /* buffer handle (in) */
- uint32_t pad;
- uint64_t offset; /* mmap offset (out) */
+ __u32 handle; /* buffer handle (in) */
+ __u32 pad;
+ __u64 offset; /* mmap offset (out) */
/* note: in case of tiled buffers, the user virtual size can be
* different from the physical size (ie. how many pages are needed
* to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
* This size here is the one that should be used if you want to
* mmap() the buffer:
*/
- uint32_t size; /* virtual size for mmap'ing (out) */
- uint32_t __pad;
+ __u32 size; /* virtual size for mmap'ing (out) */
+ __u32 __pad;
};
#define DRM_OMAP_GET_PARAM 0x00
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index db4c253..dcfc3a5 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -713,33 +713,6 @@ enum btrfs_err_code {
BTRFS_ERROR_DEV_ONLY_WRITABLE,
BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
};
-/* An error code to error string mapping for the kernel
-* error codes
-*/
-static inline char *btrfs_err_str(enum btrfs_err_code err_code)
-{
- switch (err_code) {
- case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET:
- return "unable to go below two devices on raid1";
- case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET:
- return "unable to go below four devices on raid10";
- case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET:
- return "unable to go below two devices on raid5";
- case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET:
- return "unable to go below three devices on raid6";
- case BTRFS_ERROR_DEV_TGT_REPLACE:
- return "unable to remove the dev_replace target dev";
- case BTRFS_ERROR_DEV_MISSING_NOT_FOUND:
- return "no missing devices found to remove";
- case BTRFS_ERROR_DEV_ONLY_WRITABLE:
- return "unable to remove the only writeable device";
- case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS:
- return "add/delete/balance/replace/resize operation "\
- "in progress";
- default:
- return NULL;
- }
-}
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
struct btrfs_ioctl_vol_args)
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index da7cd62..0b3d308 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -34,6 +34,7 @@
#define MLX5_ABI_USER_H
#include <linux/types.h>
+#include <linux/if_ether.h> /* For ETH_ALEN. */
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req {
};
enum mlx5_lib_caps {
- MLX5_LIB_CAP_4K_UAR = (u64)1 << 0,
+ MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
};
struct mlx5_ib_alloc_ucontext_req_v2 {
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h
index ef8e2a8..6b083d3 100644
--- a/include/video/exynos5433_decon.h
+++ b/include/video/exynos5433_decon.h
@@ -46,6 +46,7 @@
#define DECON_FRAMEFIFO_STATUS 0x0524
#define DECON_CMU 0x1404
#define DECON_UPDATE 0x1410
+#define DECON_CRFMID 0x1414
#define DECON_UPDATE_SCHEME 0x1438
#define DECON_VIDCON1 0x2000
#define DECON_VIDCON2 0x2004
@@ -126,6 +127,10 @@
/* VIDINTCON0 */
#define VIDINTCON0_FRAMEDONE (1 << 17)
+#define VIDINTCON0_FRAMESEL_BP (0 << 15)
+#define VIDINTCON0_FRAMESEL_VS (1 << 15)
+#define VIDINTCON0_FRAMESEL_AC (2 << 15)
+#define VIDINTCON0_FRAMESEL_FP (3 << 15)
#define VIDINTCON0_INTFRMEN (1 << 12)
#define VIDINTCON0_INTEN (1 << 0)
@@ -142,6 +147,13 @@
#define STANDALONE_UPDATE_F (1 << 0)
/* DECON_VIDCON1 */
+#define VIDCON1_LINECNT_MASK (0x0fff << 16)
+#define VIDCON1_I80_ACTIVE (1 << 15)
+#define VIDCON1_VSTATUS_MASK (0x3 << 13)
+#define VIDCON1_VSTATUS_VS (0 << 13)
+#define VIDCON1_VSTATUS_BP (1 << 13)
+#define VIDCON1_VSTATUS_AC (2 << 13)
+#define VIDCON1_VSTATUS_FP (3 << 13)
#define VIDCON1_VCLK_MASK (0x3 << 9)
#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
#define VIDCON1_VCLK_HOLD (0x0 << 9)
diff --git a/kernel/audit.c b/kernel/audit.c
index e794544..2f4964c 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -54,6 +54,10 @@
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
#include <linux/audit.h>
@@ -90,13 +94,34 @@ static u32 audit_default;
/* If auditing cannot proceed, audit_failure selects what happens. */
static u32 audit_failure = AUDIT_FAIL_PRINTK;
-/*
- * If audit records are to be written to the netlink socket, audit_pid
- * contains the pid of the auditd process and audit_nlk_portid contains
- * the portid to use to send netlink messages to that process.
+/* private audit network namespace index */
+static unsigned int audit_net_id;
+
+/**
+ * struct audit_net - audit private network namespace data
+ * @sk: communication socket
*/
-int audit_pid;
-static __u32 audit_nlk_portid;
+struct audit_net {
+ struct sock *sk;
+};
+
+/**
+ * struct auditd_connection - kernel/auditd connection state
+ * @pid: auditd PID
+ * @portid: netlink portid
+ * @net: the associated network namespace
+ * @lock: spinlock to protect write access
+ *
+ * Description:
+ * This struct is RCU protected; you must either hold the RCU lock for reading
+ * or the included spinlock for writing.
+ */
+static struct auditd_connection {
+ int pid;
+ u32 portid;
+ struct net *net;
+ spinlock_t lock;
+} auditd_conn;
/* If audit_rate_limit is non-zero, limit the rate of sending audit records
* to that number per second. This prevents DoS attacks, but results in
@@ -123,10 +148,6 @@ u32 audit_sig_sid = 0;
*/
static atomic_t audit_lost = ATOMIC_INIT(0);
-/* The netlink socket. */
-static struct sock *audit_sock;
-static unsigned int audit_net_id;
-
/* Hash for inode-based rules */
struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist);
/* queue msgs to send via kauditd_task */
static struct sk_buff_head audit_queue;
+static void kauditd_hold_skb(struct sk_buff *skb);
/* queue msgs due to temporary unicast send problems */
static struct sk_buff_head audit_retry_queue;
/* queue msgs waiting for new auditd connection */
@@ -192,6 +214,43 @@ struct audit_reply {
struct sk_buff *skb;
};
+/**
+ * auditd_test_task - Check to see if a given task is an audit daemon
+ * @task: the task to check
+ *
+ * Description:
+ * Return 1 if the task is a registered audit daemon, 0 otherwise.
+ */
+int auditd_test_task(const struct task_struct *task)
+{
+ int rc;
+
+ rcu_read_lock();
+ rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * audit_get_sk - Return the audit socket for the given network namespace
+ * @net: the destination network namespace
+ *
+ * Description:
+ * Returns the sock pointer if valid, NULL otherwise. The caller must ensure
+ * that a reference is held for the network namespace while the sock is in use.
+ */
+static struct sock *audit_get_sk(const struct net *net)
+{
+ struct audit_net *aunet;
+
+ if (!net)
+ return NULL;
+
+ aunet = net_generic(net, audit_net_id);
+ return aunet->sk;
+}
+
static void audit_set_portid(struct audit_buffer *ab, __u32 portid)
{
if (ab) {
@@ -210,9 +269,7 @@ void audit_panic(const char *message)
pr_err("%s\n", message);
break;
case AUDIT_FAIL_PANIC:
- /* test audit_pid since printk is always losey, why bother? */
- if (audit_pid)
- panic("audit: %s\n", message);
+ panic("audit: %s\n", message);
break;
}
}
@@ -370,21 +427,87 @@ static int audit_set_failure(u32 state)
return audit_do_config_change("audit_failure", &audit_failure, state);
}
-/*
- * For one reason or another this nlh isn't getting delivered to the userspace
- * audit daemon, just send it to printk.
+/**
+ * auditd_set - Set/Reset the auditd connection state
+ * @pid: auditd PID
+ * @portid: auditd netlink portid
+ * @net: auditd network namespace pointer
+ *
+ * Description:
+ * This function will obtain and drop network namespace references as
+ * necessary.
+ */
+static void auditd_set(int pid, u32 portid, struct net *net)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&auditd_conn.lock, flags);
+ auditd_conn.pid = pid;
+ auditd_conn.portid = portid;
+ if (auditd_conn.net)
+ put_net(auditd_conn.net);
+ if (net)
+ auditd_conn.net = get_net(net);
+ else
+ auditd_conn.net = NULL;
+ spin_unlock_irqrestore(&auditd_conn.lock, flags);
+}
+
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+ * hold queue in case auditd reconnects.
+ */
+static void auditd_reset(void)
+{
+ struct sk_buff *skb;
+
+ /* if it isn't already broken, break the connection */
+ rcu_read_lock();
+ if (auditd_conn.pid)
+ auditd_set(0, 0, NULL);
+ rcu_read_unlock();
+
+ /* flush all of the main and retry queues to the hold queue */
+ while ((skb = skb_dequeue(&audit_retry_queue)))
+ kauditd_hold_skb(skb);
+ while ((skb = skb_dequeue(&audit_queue)))
+ kauditd_hold_skb(skb);
+}
+
+/**
+ * kauditd_print_skb - Print the audit record to the ring buffer
+ * @skb: audit record
+ *
+ * Whatever the reason, this packet may not make it to the auditd connection
+ * so write it via printk so the information isn't completely lost.
*/
static void kauditd_printk_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
char *data = nlmsg_data(nlh);
- if (nlh->nlmsg_type != AUDIT_EOE) {
- if (printk_ratelimit())
- pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
- else
- audit_log_lost("printk limit exceeded");
- }
+ if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
+ pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
+}
+
+/**
+ * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
+ * @skb: audit record
+ *
+ * Description:
+ * This should only be used by the kauditd_thread when it fails to flush the
+ * hold queue.
+ */
+static void kauditd_rehold_skb(struct sk_buff *skb)
+{
+ /* put the record back in the queue at the same place */
+ skb_queue_head(&audit_hold_queue, skb);
+
+ /* fail the auditd connection */
+ auditd_reset();
}
/**
@@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_buff *skb)
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
kfree_skb(skb);
+
+ /* fail the auditd connection */
+ auditd_reset();
}
/**
@@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_buff *skb)
}
/**
- * auditd_reset - Disconnect the auditd connection
+ * auditd_send_unicast_skb - Send a record via unicast to auditd
+ * @skb: audit record
*
* Description:
- * Break the auditd/kauditd connection and move all the records in the retry
- * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex
- * must be held when calling this function.
+ * Send a skb to the audit daemon, returns positive/zero values on success and
+ * negative values on failure; in all cases the skb will be consumed by this
+ * function. If the send results in -ECONNREFUSED the connection with auditd
+ * will be reset. This function may sleep so callers should not hold any locks
+ * where this would cause a problem.
*/
-static void auditd_reset(void)
+static int auditd_send_unicast_skb(struct sk_buff *skb)
{
- struct sk_buff *skb;
+ int rc;
+ u32 portid;
+ struct net *net;
+ struct sock *sk;
- /* break the connection */
- if (audit_sock) {
- sock_put(audit_sock);
- audit_sock = NULL;
+ /* NOTE: we can't call netlink_unicast while in the RCU section so
+ * take a reference to the network namespace and grab local
+ * copies of the namespace, the sock, and the portid; the
+ * namespace and sock aren't going to go away while we hold a
+ * reference and if the portid does become invalid after the RCU
+ * section netlink_unicast() should safely return an error */
+
+ rcu_read_lock();
+ if (!auditd_conn.pid) {
+ rcu_read_unlock();
+ rc = -ECONNREFUSED;
+ goto err;
}
- audit_pid = 0;
- audit_nlk_portid = 0;
+ net = auditd_conn.net;
+ get_net(net);
+ sk = audit_get_sk(net);
+ portid = auditd_conn.portid;
+ rcu_read_unlock();
- /* flush all of the retry queue to the hold queue */
- while ((skb = skb_dequeue(&audit_retry_queue)))
- kauditd_hold_skb(skb);
+ rc = netlink_unicast(sk, skb, portid, 0);
+ put_net(net);
+ if (rc < 0)
+ goto err;
+
+ return rc;
+
+err:
+ if (rc == -ECONNREFUSED)
+ auditd_reset();
+ return rc;
}
/**
- * kauditd_send_unicast_skb - Send a record via unicast to auditd
- * @skb: audit record
+ * kauditd_send_queue - Helper for kauditd_thread to flush skb queues
+ * @sk: the sending sock
+ * @portid: the netlink destination
+ * @queue: the skb queue to process
+ * @retry_limit: limit on number of netlink unicast failures
+ * @skb_hook: per-skb hook for additional processing
+ * @err_hook: hook called if the skb fails the netlink unicast send
+ *
+ * Description:
+ * Run through the given queue and attempt to send the audit records to auditd,
+ * returns zero on success, negative values on failure. It is up to the caller
+ * to ensure that the @sk is valid for the duration of this function.
+ *
*/
-static int kauditd_send_unicast_skb(struct sk_buff *skb)
+static int kauditd_send_queue(struct sock *sk, u32 portid,
+ struct sk_buff_head *queue,
+ unsigned int retry_limit,
+ void (*skb_hook)(struct sk_buff *skb),
+ void (*err_hook)(struct sk_buff *skb))
{
- int rc;
+ int rc = 0;
+ struct sk_buff *skb;
+ static unsigned int failed = 0;
- /* if we know nothing is connected, don't even try the netlink call */
- if (!audit_pid)
- return -ECONNREFUSED;
+ /* NOTE: kauditd_thread takes care of all our locking, we just use
+ * the netlink info passed to us (e.g. sk and portid) */
- /* get an extra skb reference in case we fail to send */
- skb_get(skb);
- rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
- if (rc >= 0) {
- consume_skb(skb);
- rc = 0;
+ while ((skb = skb_dequeue(queue))) {
+ /* call the skb_hook for each skb we touch */
+ if (skb_hook)
+ (*skb_hook)(skb);
+
+ /* can we send to anyone via unicast? */
+ if (!sk) {
+ if (err_hook)
+ (*err_hook)(skb);
+ continue;
+ }
+
+ /* grab an extra skb reference in case of error */
+ skb_get(skb);
+ rc = netlink_unicast(sk, skb, portid, 0);
+ if (rc < 0) {
+ /* fatal failure for our queue flush attempt? */
+ if (++failed >= retry_limit ||
+ rc == -ECONNREFUSED || rc == -EPERM) {
+ /* yes - error processing for the queue */
+ sk = NULL;
+ if (err_hook)
+ (*err_hook)(skb);
+ if (!skb_hook)
+ goto out;
+ /* keep processing with the skb_hook */
+ continue;
+ } else
+ /* no - requeue to preserve ordering */
+ skb_queue_head(queue, skb);
+ } else {
+ /* it worked - drop the extra reference and continue */
+ consume_skb(skb);
+ failed = 0;
+ }
}
- return rc;
+out:
+ return (rc >= 0 ? 0 : rc);
}
/*
@@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(struct sk_buff *skb)
* @skb: audit record
*
* Description:
- * This function doesn't consume an skb as might be expected since it has to
- * copy it anyways.
+ * Write a multicast message to anyone listening in the initial network
+ * namespace. This function doesn't consume an skb as might be expected since
+ * it has to copy it anyways.
*/
static void kauditd_send_multicast_skb(struct sk_buff *skb)
{
struct sk_buff *copy;
- struct audit_net *aunet = net_generic(&init_net, audit_net_id);
- struct sock *sock = aunet->nlsk;
+ struct sock *sock = audit_get_sk(&init_net);
struct nlmsghdr *nlh;
+ /* NOTE: we are not taking an additional reference for init_net since
+ * we don't have to worry about it going away */
+
if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
return;
@@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
}
/**
- * kauditd_wake_condition - Return true when it is time to wake kauditd_thread
- *
- * Description:
- * This function is for use by the wait_event_freezable() call in
- * kauditd_thread().
+ * kauditd_thread - Worker thread to send audit records to userspace
+ * @dummy: unused
*/
-static int kauditd_wake_condition(void)
-{
- static int pid_last = 0;
- int rc;
- int pid = audit_pid;
-
- /* wake on new messages or a change in the connected auditd */
- rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
- if (rc)
- pid_last = pid;
-
- return rc;
-}
-
static int kauditd_thread(void *dummy)
{
int rc;
- int auditd = 0;
- int reschedule = 0;
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
+ u32 portid = 0;
+ struct net *net = NULL;
+ struct sock *sk = NULL;
#define UNICAST_RETRIES 5
-#define AUDITD_BAD(x,y) \
- ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
-
- /* NOTE: we do invalidate the auditd connection flag on any sending
- * errors, but we only "restore" the connection flag at specific places
- * in the loop in order to help ensure proper ordering of audit
- * records */
set_freezable();
while (!kthread_should_stop()) {
- /* NOTE: possible area for future improvement is to look at
- * the hold and retry queues, since only this thread
- * has access to these queues we might be able to do
- * our own queuing and skip some/all of the locking */
-
- /* NOTE: it might be a fun experiment to split the hold and
- * retry queue handling to another thread, but the
- * synchronization issues and other overhead might kill
- * any performance gains */
+ /* NOTE: see the lock comments in auditd_send_unicast_skb() */
+ rcu_read_lock();
+ if (!auditd_conn.pid) {
+ rcu_read_unlock();
+ goto main_queue;
+ }
+ net = auditd_conn.net;
+ get_net(net);
+ sk = audit_get_sk(net);
+ portid = auditd_conn.portid;
+ rcu_read_unlock();
/* attempt to flush the hold queue */
- while (auditd && (skb = skb_dequeue(&audit_hold_queue))) {
- rc = kauditd_send_unicast_skb(skb);
- if (rc) {
- /* requeue to the same spot */
- skb_queue_head(&audit_hold_queue, skb);
-
- auditd = 0;
- if (AUDITD_BAD(rc, reschedule)) {
- mutex_lock(&audit_cmd_mutex);
- auditd_reset();
- mutex_unlock(&audit_cmd_mutex);
- reschedule = 0;
- }
- } else
- /* we were able to send successfully */
- reschedule = 0;
+ rc = kauditd_send_queue(sk, portid,
+ &audit_hold_queue, UNICAST_RETRIES,
+ NULL, kauditd_rehold_skb);
+ if (rc < 0) {
+ sk = NULL;
+ goto main_queue;
}
/* attempt to flush the retry queue */
- while (auditd && (skb = skb_dequeue(&audit_retry_queue))) {
- rc = kauditd_send_unicast_skb(skb);
- if (rc) {
- auditd = 0;
- if (AUDITD_BAD(rc, reschedule)) {
- kauditd_hold_skb(skb);
- mutex_lock(&audit_cmd_mutex);
- auditd_reset();
- mutex_unlock(&audit_cmd_mutex);
- reschedule = 0;
- } else
- /* temporary problem (we hope), queue
- * to the same spot and retry */
- skb_queue_head(&audit_retry_queue, skb);
- } else
- /* we were able to send successfully */
- reschedule = 0;
+ rc = kauditd_send_queue(sk, portid,
+ &audit_retry_queue, UNICAST_RETRIES,
+ NULL, kauditd_hold_skb);
+ if (rc < 0) {
+ sk = NULL;
+ goto main_queue;
}
- /* standard queue processing, try to be as quick as possible */
-quick_loop:
- skb = skb_dequeue(&audit_queue);
- if (skb) {
- /* setup the netlink header, see the comments in
- * kauditd_send_multicast_skb() for length quirks */
- nlh = nlmsg_hdr(skb);
- nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
+main_queue:
+ /* process the main queue - do the multicast send and attempt
+ * unicast, dump failed record sends to the retry queue; if
+ * sk == NULL due to previous failures we will just do the
+ * multicast send and move the record to the retry queue */
+ kauditd_send_queue(sk, portid, &audit_queue, 1,
+ kauditd_send_multicast_skb,
+ kauditd_retry_skb);
- /* attempt to send to any multicast listeners */
- kauditd_send_multicast_skb(skb);
-
- /* attempt to send to auditd, queue on failure */
- if (auditd) {
- rc = kauditd_send_unicast_skb(skb);
- if (rc) {
- auditd = 0;
- if (AUDITD_BAD(rc, reschedule)) {
- mutex_lock(&audit_cmd_mutex);
- auditd_reset();
- mutex_unlock(&audit_cmd_mutex);
- reschedule = 0;
- }
-
- /* move to the retry queue */
- kauditd_retry_skb(skb);
- } else
- /* everything is working so go fast! */
- goto quick_loop;
- } else if (reschedule)
- /* we are currently having problems, move to
- * the retry queue */
- kauditd_retry_skb(skb);
- else
- /* dump the message via printk and hold it */
- kauditd_hold_skb(skb);
- } else {
- /* we have flushed the backlog so wake everyone */
- wake_up(&audit_backlog_wait);
-
- /* if everything is okay with auditd (if present), go
- * to sleep until there is something new in the queue
- * or we have a change in the connected auditd;
- * otherwise simply reschedule to give things a chance
- * to recover */
- if (reschedule) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- } else
- wait_event_freezable(kauditd_wait,
- kauditd_wake_condition());
-
- /* update the auditd connection status */
- auditd = (audit_pid ? 1 : 0);
+ /* drop our netns reference, no auditd sends past this line */
+ if (net) {
+ put_net(net);
+ net = NULL;
}
+ sk = NULL;
+
+ /* we have processed all the queues so wake everyone */
+ wake_up(&audit_backlog_wait);
+
+ /* NOTE: we want to wake up if there is anything on the queue,
+ * regardless of if an auditd is connected, as we need to
+ * do the multicast send and rotate records from the
+ * main queue to the retry/hold queues */
+ wait_event_freezable(kauditd_wait,
+ (skb_queue_len(&audit_queue) ? 1 : 0));
}
return 0;
@@ -678,17 +804,16 @@ int audit_send_list(void *_dest)
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
- struct net *net = dest->net;
- struct audit_net *aunet = net_generic(net, audit_net_id);
+ struct sock *sk = audit_get_sk(dest->net);
/* wait for parent to finish and send an ACK */
mutex_lock(&audit_cmd_mutex);
mutex_unlock(&audit_cmd_mutex);
while ((skb = __skb_dequeue(&dest->q)) != NULL)
- netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
+ netlink_unicast(sk, skb, dest->portid, 0);
- put_net(net);
+ put_net(dest->net);
kfree(dest);
return 0;
@@ -722,16 +847,15 @@ struct sk_buff *audit_make_reply(__u32 portid, int seq, int type, int done,
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
- struct net *net = reply->net;
- struct audit_net *aunet = net_generic(net, audit_net_id);
+ struct sock *sk = audit_get_sk(reply->net);
mutex_lock(&audit_cmd_mutex);
mutex_unlock(&audit_cmd_mutex);
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
- netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
- put_net(net);
+ netlink_unicast(sk, reply->skb, reply->portid, 0);
+ put_net(reply->net);
kfree(reply);
return 0;
}
@@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_buff *skb)
static int audit_replace(pid_t pid)
{
- struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
- &pid, sizeof(pid));
+ struct sk_buff *skb;
+ skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid));
if (!skb)
return -ENOMEM;
- return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+ return auditd_send_unicast_skb(skb);
}
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
memset(&s, 0, sizeof(s));
s.enabled = audit_enabled;
s.failure = audit_failure;
- s.pid = audit_pid;
+ rcu_read_lock();
+ s.pid = auditd_conn.pid;
+ rcu_read_unlock();
s.rate_limit = audit_rate_limit;
s.backlog_limit = audit_backlog_limit;
s.lost = atomic_read(&audit_lost);
@@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
* from the initial pid namespace, but something
* to keep in mind if this changes */
int new_pid = s.pid;
+ pid_t auditd_pid;
pid_t requesting_pid = task_tgid_vnr(current);
- if ((!new_pid) && (requesting_pid != audit_pid)) {
- audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
+ /* test the auditd connection */
+ audit_replace(requesting_pid);
+
+ rcu_read_lock();
+ auditd_pid = auditd_conn.pid;
+ /* only the current auditd can unregister itself */
+ if ((!new_pid) && (requesting_pid != auditd_pid)) {
+ rcu_read_unlock();
+ audit_log_config_change("audit_pid", new_pid,
+ auditd_pid, 0);
return -EACCES;
}
- if (audit_pid && new_pid &&
- audit_replace(requesting_pid) != -ECONNREFUSED) {
- audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
+ /* replacing a healthy auditd is not allowed */
+ if (auditd_pid && new_pid) {
+ rcu_read_unlock();
+ audit_log_config_change("audit_pid", new_pid,
+ auditd_pid, 0);
return -EEXIST;
}
+ rcu_read_unlock();
+
if (audit_enabled != AUDIT_OFF)
- audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
+ audit_log_config_change("audit_pid", new_pid,
+ auditd_pid, 1);
+
if (new_pid) {
- if (audit_sock)
- sock_put(audit_sock);
- audit_pid = new_pid;
- audit_nlk_portid = NETLINK_CB(skb).portid;
- sock_hold(skb->sk);
- audit_sock = skb->sk;
- } else {
+ /* register a new auditd connection */
+ auditd_set(new_pid,
+ NETLINK_CB(skb).portid,
+ sock_net(NETLINK_CB(skb).sk));
+ /* try to process any backlog */
+ wake_up_interruptible(&kauditd_wait);
+ } else
+ /* unregister the auditd connection */
auditd_reset();
- }
- wake_up_interruptible(&kauditd_wait);
}
if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
err = audit_set_rate_limit(s.rate_limit);
@@ -1090,7 +1230,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (err)
break;
}
- mutex_unlock(&audit_cmd_mutex);
audit_log_common_recv_msg(&ab, msg_type);
if (msg_type != AUDIT_USER_TTY)
audit_log_format(ab, " msg='%.*s'",
@@ -1108,7 +1247,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
audit_set_portid(ab, NETLINK_CB(skb).portid);
audit_log_end(ab);
- mutex_lock(&audit_cmd_mutex);
}
break;
case AUDIT_ADD_RULE:
@@ -1298,26 +1436,26 @@ static int __net_init audit_net_init(struct net *net)
struct audit_net *aunet = net_generic(net, audit_net_id);
- aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
- if (aunet->nlsk == NULL) {
+ aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
+ if (aunet->sk == NULL) {
audit_panic("cannot initialize netlink socket in namespace");
return -ENOMEM;
}
- aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+
return 0;
}
static void __net_exit audit_net_exit(struct net *net)
{
struct audit_net *aunet = net_generic(net, audit_net_id);
- struct sock *sock = aunet->nlsk;
- mutex_lock(&audit_cmd_mutex);
- if (sock == audit_sock)
- auditd_reset();
- mutex_unlock(&audit_cmd_mutex);
- netlink_kernel_release(sock);
- aunet->nlsk = NULL;
+ rcu_read_lock();
+ if (net == auditd_conn.net)
+ auditd_reset();
+ rcu_read_unlock();
+
+ netlink_kernel_release(aunet->sk);
}
static struct pernet_operations audit_net_ops __net_initdata = {
@@ -1335,20 +1473,24 @@ static int __init audit_init(void)
if (audit_initialized == AUDIT_DISABLED)
return 0;
- pr_info("initializing netlink subsys (%s)\n",
- audit_default ? "enabled" : "disabled");
- register_pernet_subsys(&audit_net_ops);
+ memset(&auditd_conn, 0, sizeof(auditd_conn));
+ spin_lock_init(&auditd_conn.lock);
skb_queue_head_init(&audit_queue);
skb_queue_head_init(&audit_retry_queue);
skb_queue_head_init(&audit_hold_queue);
- audit_initialized = AUDIT_INITIALIZED;
- audit_enabled = audit_default;
- audit_ever_enabled |= !!audit_default;
for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
INIT_LIST_HEAD(&audit_inode_hash[i]);
+ pr_info("initializing netlink subsys (%s)\n",
+ audit_default ? "enabled" : "disabled");
+ register_pernet_subsys(&audit_net_ops);
+
+ audit_initialized = AUDIT_INITIALIZED;
+ audit_enabled = audit_default;
+ audit_ever_enabled |= !!audit_default;
+
kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
if (IS_ERR(kauditd_task)) {
int err = PTR_ERR(kauditd_task);
@@ -1519,20 +1661,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
return NULL;
- /* don't ever fail/sleep on these two conditions:
+ /* NOTE: don't ever fail/sleep on these two conditions:
* 1. auditd generated record - since we need auditd to drain the
* queue; also, when we are checking for auditd, compare PIDs using
* task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
* using a PID anchored in the caller's namespace
- * 2. audit command message - record types 1000 through 1099 inclusive
- * are command messages/records used to manage the kernel subsystem
- * and the audit userspace, blocking on these messages could cause
- * problems under load so don't do it (note: not all of these
- * command types are valid as record types, but it is quicker to
- * just check two ints than a series of ints in a if/switch stmt) */
- if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
- (type >= 1000 && type <= 1099))) {
- long sleep_time = audit_backlog_wait_time;
+ * 2. generator holding the audit_cmd_mutex - we don't want to block
+ * while holding the mutex */
+ if (!(auditd_test_task(current) ||
+ (current == __mutex_owner(&audit_cmd_mutex)))) {
+ long stime = audit_backlog_wait_time;
while (audit_backlog_limit &&
(skb_queue_len(&audit_queue) > audit_backlog_limit)) {
@@ -1541,14 +1679,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
/* sleep if we are allowed and we haven't exhausted our
* backlog wait limit */
- if ((gfp_mask & __GFP_DIRECT_RECLAIM) &&
- (sleep_time > 0)) {
+ if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&audit_backlog_wait,
&wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- sleep_time = schedule_timeout(sleep_time);
+ stime = schedule_timeout(stime);
remove_wait_queue(&audit_backlog_wait, &wait);
} else {
if (audit_rate_check() && printk_ratelimit())
@@ -2127,15 +2264,27 @@ void audit_log_link_denied(const char *operation, const struct path *link)
*/
void audit_log_end(struct audit_buffer *ab)
{
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+
if (!ab)
return;
- if (!audit_rate_check()) {
- audit_log_lost("rate limit exceeded");
- } else {
- skb_queue_tail(&audit_queue, ab->skb);
- wake_up_interruptible(&kauditd_wait);
+
+ if (audit_rate_check()) {
+ skb = ab->skb;
ab->skb = NULL;
- }
+
+ /* setup the netlink header, see the comments in
+ * kauditd_send_multicast_skb() for length quirks */
+ nlh = nlmsg_hdr(skb);
+ nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
+
+ /* queue the netlink packet and poke the kauditd thread */
+ skb_queue_tail(&audit_queue, skb);
+ wake_up_interruptible(&kauditd_wait);
+ } else
+ audit_log_lost("rate limit exceeded");
+
audit_buffer_free(ab);
}
diff --git a/kernel/audit.h b/kernel/audit.h
index ca57988..0f1cf6d 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context,
struct audit_names *n, const struct path *path,
int record_num, int *call_panic);
-extern int audit_pid;
+extern int auditd_test_task(const struct task_struct *task);
#define AUDIT_INODE_BUCKETS 32
extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -250,10 +250,6 @@ struct audit_netlink_list {
int audit_send_list(void *);
-struct audit_net {
- struct sock *nlsk;
-};
-
extern int selinux_audit_rule_update(void);
extern struct mutex audit_filter_mutex;
@@ -340,8 +336,7 @@ extern int audit_filter(int msgtype, unsigned int listtype);
extern int __audit_signal_info(int sig, struct task_struct *t);
static inline int audit_signal_info(int sig, struct task_struct *t)
{
- if (unlikely((audit_pid && t->tgid == audit_pid) ||
- (audit_signals && !audit_dummy_context())))
+ if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
return __audit_signal_info(sig, t);
return 0;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index d6a8de5..e59ffc7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
struct audit_entry *e;
enum audit_state state;
- if (audit_pid && tsk->tgid == audit_pid)
+ if (auditd_test_task(tsk))
return AUDIT_DISABLED;
rcu_read_lock();
@@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
{
struct audit_names *n;
- if (audit_pid && tsk->tgid == audit_pid)
+ if (auditd_test_task(tsk))
return;
rcu_read_lock();
@@ -2256,7 +2256,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
struct audit_context *ctx = tsk->audit_context;
kuid_t uid = current_uid(), t_uid = task_uid(t);
- if (audit_pid && t->tgid == audit_pid) {
+ if (auditd_test_task(t)) {
if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
audit_sig_pid = task_tgid_nr(tsk);
if (uid_valid(tsk->loginuid))
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index afe5bab..361a69d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -30,18 +30,12 @@ struct bpf_htab {
struct pcpu_freelist freelist;
struct bpf_lru lru;
};
- void __percpu *extra_elems;
+ struct htab_elem *__percpu *extra_elems;
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
};
-enum extra_elem_state {
- HTAB_NOT_AN_EXTRA_ELEM = 0,
- HTAB_EXTRA_ELEM_FREE,
- HTAB_EXTRA_ELEM_USED
-};
-
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
@@ -56,7 +50,6 @@ struct htab_elem {
};
union {
struct rcu_head rcu;
- enum extra_elem_state state;
struct bpf_lru_node lru_node;
};
u32 hash;
@@ -77,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}
+static bool htab_is_prealloc(const struct bpf_htab *htab)
+{
+ return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
+}
+
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
@@ -128,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
static int prealloc_init(struct bpf_htab *htab)
{
+ u32 num_entries = htab->map.max_entries;
int err = -ENOMEM, i;
- htab->elems = bpf_map_area_alloc(htab->elem_size *
- htab->map.max_entries);
+ if (!htab_is_percpu(htab) && !htab_is_lru(htab))
+ num_entries += num_possible_cpus();
+
+ htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries);
if (!htab->elems)
return -ENOMEM;
if (!htab_is_percpu(htab))
goto skip_percpu_elems;
- for (i = 0; i < htab->map.max_entries; i++) {
+ for (i = 0; i < num_entries; i++) {
u32 size = round_up(htab->map.value_size, 8);
void __percpu *pptr;
@@ -166,11 +167,11 @@ static int prealloc_init(struct bpf_htab *htab)
if (htab_is_lru(htab))
bpf_lru_populate(&htab->lru, htab->elems,
offsetof(struct htab_elem, lru_node),
- htab->elem_size, htab->map.max_entries);
+ htab->elem_size, num_entries);
else
pcpu_freelist_populate(&htab->freelist,
htab->elems + offsetof(struct htab_elem, fnode),
- htab->elem_size, htab->map.max_entries);
+ htab->elem_size, num_entries);
return 0;
@@ -191,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab)
static int alloc_extra_elems(struct bpf_htab *htab)
{
- void __percpu *pptr;
+ struct htab_elem *__percpu *pptr, *l_new;
+ struct pcpu_freelist_node *l;
int cpu;
- pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
+ pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
+ GFP_USER | __GFP_NOWARN);
if (!pptr)
return -ENOMEM;
for_each_possible_cpu(cpu) {
- ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
- HTAB_EXTRA_ELEM_FREE;
+ l = pcpu_freelist_pop(&htab->freelist);
+ /* pop will succeed, since prealloc_init()
+ * preallocated extra num_possible_cpus elements
+ */
+ l_new = container_of(l, struct htab_elem, fnode);
+ *per_cpu_ptr(pptr, cpu) = l_new;
}
htab->extra_elems = pptr;
return 0;
@@ -342,25 +349,25 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
raw_spin_lock_init(&htab->buckets[i].lock);
}
- if (!percpu && !lru) {
- /* lru itself can remove the least used element, so
- * there is no need for an extra elem during map_update.
- */
- err = alloc_extra_elems(htab);
- if (err)
- goto free_buckets;
- }
-
if (prealloc) {
err = prealloc_init(htab);
if (err)
- goto free_extra_elems;
+ goto free_buckets;
+
+ if (!percpu && !lru) {
+ /* lru itself can remove the least used element, so
+ * there is no need for an extra elem during map_update.
+ */
+ err = alloc_extra_elems(htab);
+ if (err)
+ goto free_prealloc;
+ }
}
return &htab->map;
-free_extra_elems:
- free_percpu(htab->extra_elems);
+free_prealloc:
+ prealloc_destroy(htab);
free_buckets:
bpf_map_area_free(htab->buckets);
free_htab:
@@ -575,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{
- if (l->state == HTAB_EXTRA_ELEM_USED) {
- l->state = HTAB_EXTRA_ELEM_FREE;
- return;
- }
-
- if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
+ if (htab_is_prealloc(htab)) {
pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
atomic_dec(&htab->count);
@@ -610,47 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
void *value, u32 key_size, u32 hash,
bool percpu, bool onallcpus,
- bool old_elem_exists)
+ struct htab_elem *old_elem)
{
u32 size = htab->map.value_size;
- bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
- struct htab_elem *l_new;
+ bool prealloc = htab_is_prealloc(htab);
+ struct htab_elem *l_new, **pl_new;
void __percpu *pptr;
- int err = 0;
if (prealloc) {
- struct pcpu_freelist_node *l;
-
- l = pcpu_freelist_pop(&htab->freelist);
- if (!l)
- err = -E2BIG;
- else
- l_new = container_of(l, struct htab_elem, fnode);
- } else {
- if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
- atomic_dec(&htab->count);
- err = -E2BIG;
+ if (old_elem) {
+ /* if we're updating the existing element,
+ * use per-cpu extra elems to avoid freelist_pop/push
+ */
+ pl_new = this_cpu_ptr(htab->extra_elems);
+ l_new = *pl_new;
+ *pl_new = old_elem;
} else {
- l_new = kmalloc(htab->elem_size,
- GFP_ATOMIC | __GFP_NOWARN);
- if (!l_new)
- return ERR_PTR(-ENOMEM);
+ struct pcpu_freelist_node *l;
+
+ l = pcpu_freelist_pop(&htab->freelist);
+ if (!l)
+ return ERR_PTR(-E2BIG);
+ l_new = container_of(l, struct htab_elem, fnode);
}
- }
-
- if (err) {
- if (!old_elem_exists)
- return ERR_PTR(err);
-
- /* if we're updating the existing element and the hash table
- * is full, use per-cpu extra elems
- */
- l_new = this_cpu_ptr(htab->extra_elems);
- if (l_new->state != HTAB_EXTRA_ELEM_FREE)
- return ERR_PTR(-E2BIG);
- l_new->state = HTAB_EXTRA_ELEM_USED;
} else {
- l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
+ if (atomic_inc_return(&htab->count) > htab->map.max_entries)
+ if (!old_elem) {
+ /* when map is full and update() is replacing
+ * old element, it's ok to allocate, since
+ * old element will be freed immediately.
+ * Otherwise return an error
+ */
+ atomic_dec(&htab->count);
+ return ERR_PTR(-E2BIG);
+ }
+ l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!l_new)
+ return ERR_PTR(-ENOMEM);
}
memcpy(l_new->key, key, key_size);
@@ -731,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
goto err;
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
- !!l_old);
+ l_old);
if (IS_ERR(l_new)) {
/* all pre-allocated elements are in use or memory exhausted */
ret = PTR_ERR(l_new);
@@ -744,7 +742,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) {
hlist_nulls_del_rcu(&l_old->hash_node);
- free_htab_elem(htab, l_old);
+ if (!htab_is_prealloc(htab))
+ free_htab_elem(htab, l_old);
}
ret = 0;
err:
@@ -856,7 +855,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
value, onallcpus);
} else {
l_new = alloc_htab_elem(htab, key, value, key_size,
- hash, true, onallcpus, false);
+ hash, true, onallcpus, NULL);
if (IS_ERR(l_new)) {
ret = PTR_ERR(l_new);
goto err;
@@ -1024,8 +1023,7 @@ static void delete_all_elements(struct bpf_htab *htab)
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
hlist_nulls_del_rcu(&l->hash_node);
- if (l->state != HTAB_EXTRA_ELEM_USED)
- htab_elem_free(htab, l);
+ htab_elem_free(htab, l);
}
}
}
@@ -1045,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map)
* not have executed. Wait for them.
*/
rcu_barrier();
- if (htab->map.map_flags & BPF_F_NO_PREALLOC)
+ if (!htab_is_prealloc(htab))
delete_all_elements(htab);
else
prealloc_destroy(htab);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f7c0632..37b223e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1335,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
struct cpuhp_step *sp;
int ret = 0;
- mutex_lock(&cpuhp_state_mutex);
-
if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
- goto out;
+ return ret;
state = ret;
}
sp = cpuhp_get_step(state);
- if (name && sp->name) {
- ret = -EBUSY;
- goto out;
- }
+ if (name && sp->name)
+ return -EBUSY;
+
sp->startup.single = startup;
sp->teardown.single = teardown;
sp->name = name;
sp->multi_instance = multi_instance;
INIT_HLIST_HEAD(&sp->list);
-out:
- mutex_unlock(&cpuhp_state_mutex);
return ret;
}
@@ -1428,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
return -EINVAL;
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi)
goto add_node;
@@ -1447,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
if (ret) {
if (sp->teardown.multi)
cpuhp_rollback_install(cpu, state, node);
- goto err;
+ goto unlock;
}
}
add_node:
ret = 0;
- mutex_lock(&cpuhp_state_mutex);
hlist_add_head(node, &sp->list);
+unlock:
mutex_unlock(&cpuhp_state_mutex);
-
-err:
put_online_cpus();
return ret;
}
@@ -1491,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
return -EINVAL;
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
@@ -1524,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
}
}
out:
+ mutex_unlock(&cpuhp_state_mutex);
put_online_cpus();
/*
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
@@ -1547,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
return -EINVAL;
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
+
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
/*
@@ -1563,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
}
remove:
- mutex_lock(&cpuhp_state_mutex);
hlist_del(node);
mutex_unlock(&cpuhp_state_mutex);
put_online_cpus();
@@ -1571,6 +1568,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
return 0;
}
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
+
/**
* __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
* @state: The state to remove
@@ -1589,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
get_online_cpus();
+ mutex_lock(&cpuhp_state_mutex);
if (sp->multi_instance) {
WARN(!hlist_empty(&sp->list),
"Error: Removing state %d which has instances left.\n",
@@ -1613,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
}
remove:
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+ mutex_unlock(&cpuhp_state_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(__cpuhp_remove_state);
diff --git a/kernel/futex.c b/kernel/futex.c
index 229a744..45858ec 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
@@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
@@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
spin_unlock(q.lock_ptr);
}
} else {
+ struct rt_mutex *pi_mutex;
+
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res)
ret = (res < 0) ? res : 0;
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle
+ * the fault, unlock the rt_mutex and return the fault to
+ * userspace.
+ */
+ if (ret && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
+ if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 7bc24d4..c65f798 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -213,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
*/
if (sem->count == 0)
break;
- if (signal_pending_state(state, current)) {
- ret = -EINTR;
- goto out;
- }
+ if (signal_pending_state(state, current))
+ goto out_nolock;
+
set_current_state(state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
@@ -224,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
}
/* got the lock */
sem->count = -1;
-out:
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
+
+out_nolock:
+ list_del(&waiter.list);
+ if (!list_empty(&sem->wait_list))
+ __rwsem_do_wake(sem, 1);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ return -EINTR;
}
void __sched __down_write(struct rw_semaphore *sem)
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 0612323..07e85e5 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
- lock_device_hotplug();
mem_hotplug_begin();
arch_remove_memory(align_start, align_size);
mem_hotplug_done();
- unlock_device_hotplug();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
@@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (error)
goto err_pfn_remap;
- lock_device_hotplug();
mem_hotplug_begin();
error = arch_add_memory(nid, align_start, align_size, true);
mem_hotplug_done();
- unlock_device_hotplug();
if (error)
goto err_add_memory;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cd7cd48..54c5775 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy)
for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
+ memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->sg_policy = sg_policy;
- if (policy_is_shared(policy)) {
- sg_cpu->util = 0;
- sg_cpu->max = 0;
- sg_cpu->flags = SCHED_CPUFREQ_RT;
- sg_cpu->last_update = 0;
- sg_cpu->iowait_boost = 0;
- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_shared);
- } else {
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- sugov_update_single);
- }
+ sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+ cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+ policy_is_shared(policy) ?
+ sugov_update_shared :
+ sugov_update_single);
}
return 0;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 99b2c33..a2ce590 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
*
* This function returns true if:
*
- * runtime / (deadline - t) > dl_runtime / dl_period ,
+ * runtime / (deadline - t) > dl_runtime / dl_deadline ,
*
* IOW we can't recycle current parameters.
*
- * Notice that the bandwidth check is done against the period. For
+ * Notice that the bandwidth check is done against the deadline. For
* task with deadline equal to period this is the same of using
- * dl_deadline instead of dl_period in the equation above.
+ * dl_period instead of dl_deadline in the equation above.
*/
static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
struct sched_dl_entity *pi_se, u64 t)
@@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
* of anything below microseconds resolution is actually fiction
* (but still we want to give the user that illusion >;).
*/
- left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+ left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
right = ((dl_se->deadline - t) >> DL_SCALE) *
(pi_se->dl_runtime >> DL_SCALE);
@@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
}
}
+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
+{
+ return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
+}
+
/*
* If the entity depleted all its runtime, and if we want it to sleep
* while waiting for some new execution time to become available, we
- * set the bandwidth enforcement timer to the replenishment instant
+ * set the bandwidth replenishment timer to the replenishment instant
* and try to activate it.
*
* Notice that it is important for the caller to know if the timer
@@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p)
* that it is actually coming from rq->clock and not from
* hrtimer's time base reading.
*/
- act = ns_to_ktime(dl_se->deadline);
+ act = ns_to_ktime(dl_next_period(dl_se));
now = hrtimer_cb_get_time(timer);
delta = ktime_to_ns(now) - rq_clock(rq);
act = ktime_add_ns(act, delta);
@@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
lockdep_unpin_lock(&rq->lock, rf.cookie);
rq = dl_task_offline_migration(rq, p);
rf.cookie = lockdep_pin_lock(&rq->lock);
+ update_rq_clock(rq);
/*
* Now that the task has been migrated to the new RQ and we
@@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
timer->function = dl_task_timer;
}
+/*
+ * During the activation, CBS checks if it can reuse the current task's
+ * runtime and period. If the deadline of the task is in the past, CBS
+ * cannot use the runtime, and so it replenishes the task. This rule
+ * works fine for implicit deadline tasks (deadline == period), and the
+ * CBS was designed for implicit deadline tasks. However, a task with
+ * constrained deadline (deadine < period) might be awakened after the
+ * deadline, but before the next period. In this case, replenishing the
+ * task would allow it to run for runtime / deadline. As in this case
+ * deadline < period, CBS enables a task to run for more than the
+ * runtime / period. In a very loaded system, this can cause a domino
+ * effect, making other tasks miss their deadlines.
+ *
+ * To avoid this problem, in the activation of a constrained deadline
+ * task after the deadline but before the next period, throttle the
+ * task and set the replenishing timer to the begin of the next period,
+ * unless it is boosted.
+ */
+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
+{
+ struct task_struct *p = dl_task_of(dl_se);
+ struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
+
+ if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
+ dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
+ if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
+ return;
+ dl_se->dl_throttled = 1;
+ }
+}
+
static
int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
{
@@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
__dequeue_dl_entity(dl_se);
}
+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_deadline < dl_se->dl_period;
+}
+
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -948,6 +990,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
}
/*
+ * Check if a constrained deadline task was activated
+ * after the deadline but before the next period.
+ * If that is the case, the task will be throttled and
+ * the replenishment timer will be set to the next period.
+ */
+ if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
+ dl_check_constrained_dl(&p->dl);
+
+ /*
* If p is throttled, we do nothing. In fact, if it exhausted
* its budget it needs a replenishment and, since it now is on
* its rq, the bandwidth timer callback (which clearly has not
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index 7296b73..f15fb2b 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void)
* If the folding window started, make sure we start writing in the
* next idle-delta.
*/
- if (!time_before(jiffies, calc_load_update))
+ if (!time_before(jiffies, READ_ONCE(calc_load_update)))
idx++;
return idx & 1;
@@ -202,8 +202,9 @@ void calc_load_exit_idle(void)
struct rq *this_rq = this_rq();
/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = READ_ONCE(calc_load_update);
if (time_before(jiffies, this_rq->calc_load_update))
return;
@@ -212,7 +213,6 @@ void calc_load_exit_idle(void)
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
@@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp,
*/
static void calc_global_nohz(void)
{
+ unsigned long sample_window;
long delta, active, n;
- if (!time_before(jiffies, calc_load_update + 10)) {
+ sample_window = READ_ONCE(calc_load_update);
+ if (!time_before(jiffies, sample_window + 10)) {
/*
* Catch-up, fold however many we are behind still
*/
- delta = jiffies - calc_load_update - 10;
+ delta = jiffies - sample_window - 10;
n = 1 + (delta / LOAD_FREQ);
active = atomic_long_read(&calc_load_tasks);
@@ -324,7 +326,7 @@ static void calc_global_nohz(void)
avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
- calc_load_update += n * LOAD_FREQ;
+ WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
}
/*
@@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { }
*/
void calc_global_load(unsigned long ticks)
{
+ unsigned long sample_window;
long active, delta;
- if (time_before(jiffies, calc_load_update + 10))
+ sample_window = READ_ONCE(calc_load_update);
+ if (time_before(jiffies, sample_window + 10))
return;
/*
@@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks)
avenrun[1] = calc_load(avenrun[1], EXP_5, active);
avenrun[2] = calc_load(avenrun[2], EXP_15, active);
- calc_load_update += LOAD_FREQ;
+ WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
/*
* In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 295479b..6fa7208 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -125,9 +125,12 @@ void put_online_mems(void)
}
+/* Serializes write accesses to mem_hotplug.active_writer. */
+static DEFINE_MUTEX(memory_add_remove_lock);
+
void mem_hotplug_begin(void)
{
- assert_held_device_hotplug();
+ mutex_lock(&memory_add_remove_lock);
mem_hotplug.active_writer = current;
@@ -147,6 +150,7 @@ void mem_hotplug_done(void)
mem_hotplug.active_writer = NULL;
mutex_unlock(&mem_hotplug.lock);
memhp_lock_release();
+ mutex_unlock(&memory_add_remove_lock);
}
/* add this memory to iomem resource */
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 9b5bc86..b1ccb58 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry)
{
struct swap_slots_cache *cache;
- BUG_ON(!swap_slot_cache_initialized);
-
cache = &get_cpu_var(swp_slots);
if (use_swap_slot_cache && cache->slots_ret) {
spin_lock_irq(&cache->free_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0dd8022..0b05762 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1683,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
if (fatal_signal_pending(current)) {
area->nr_pages = i;
- goto fail;
+ goto fail_no_warn;
}
if (node == NUMA_NO_NODE)
@@ -1709,6 +1709,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure, allocated %ld of %ld bytes",
(area->nr_pages*PAGE_SIZE), area->size);
+fail_no_warn:
vfree(area->addr);
return NULL;
}
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 8970a2f..f9492bc 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -667,6 +667,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
z3fold_page_unlock(zhdr);
spin_lock(&pool->lock);
if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+ spin_unlock(&pool->lock);
atomic64_dec(&pool->pages_nr);
return 0;
}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7c3d994e..71343d0 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
batadv_iv_ogm_schedule(hard_iface);
}
+/**
+ * batadv_iv_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default TQ difference threshold to 20 */
+ atomic_set(&bat_priv->gw.sel_class, 20);
+}
+
static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
@@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.del_if = batadv_iv_ogm_orig_del_if,
},
.gw = {
+ .init_sel_class = batadv_iv_init_sel_class,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 0acd081..a36c8e7 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -668,6 +668,16 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret;
}
+/**
+ * batadv_v_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default throughput difference threshold to 5Mbps */
+ atomic_set(&bat_priv->gw.sel_class, 50);
+}
+
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
char *buff, size_t count)
{
@@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.dump = batadv_v_orig_dump,
},
.gw = {
+ .init_sel_class = batadv_v_init_sel_class,
.store_sel_class = batadv_v_store_sel_class,
.show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
if (ret < 0)
return ret;
- /* set default throughput difference threshold to 5Mbps */
- atomic_set(&bat_priv->gw.sel_class, 50);
-
return 0;
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 11a23fd..8f964be 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -404,7 +404,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
* batadv_frag_create - create a fragment from skb
* @skb: skb to create fragment from
* @frag_head: header to use in new fragment
- * @mtu: size of new fragment
+ * @fragment_size: size of new fragment
*
* Split the passed skb into two fragments: A new one with size matching the
* passed mtu and the old one with the rest. The new skb contains data from the
@@ -414,11 +414,11 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
*/
static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
struct batadv_frag_packet *frag_head,
- unsigned int mtu)
+ unsigned int fragment_size)
{
struct sk_buff *skb_fragment;
unsigned int header_size = sizeof(*frag_head);
- unsigned int fragment_size = mtu - header_size;
+ unsigned int mtu = fragment_size + header_size;
skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
if (!skb_fragment)
@@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
struct sk_buff *skb_fragment;
unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned int header_size = sizeof(frag_header);
- unsigned int max_fragment_size, max_packet_size;
+ unsigned int max_fragment_size, num_fragments;
int ret;
/* To avoid merge and refragmentation at next-hops we never send
@@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb,
*/
mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
max_fragment_size = mtu - header_size;
- max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+ if (skb->len == 0 || max_fragment_size == 0)
+ return -EINVAL;
+
+ num_fragments = (skb->len - 1) / max_fragment_size + 1;
+ max_fragment_size = (skb->len - 1) / num_fragments + 1;
/* Don't even try to fragment, if we need more than 16 fragments */
- if (skb->len > max_packet_size) {
+ if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
ret = -EAGAIN;
goto free_skb;
}
@@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
goto put_primary_if;
}
- skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+ skb_fragment = batadv_frag_create(skb, &frag_header,
+ max_fragment_size);
if (!skb_fragment) {
ret = -ENOMEM;
goto put_primary_if;
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 5db2e43..33940c5 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
*/
void batadv_gw_init(struct batadv_priv *bat_priv)
{
+ if (bat_priv->algo_ops->gw.init_sel_class)
+ bat_priv->algo_ops->gw.init_sel_class(bat_priv);
+ else
+ atomic_set(&bat_priv->gw.sel_class, 1);
+
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5d099b2..d042c99 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
#endif
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
- atomic_set(&bat_priv->gw.sel_class, 20);
atomic_set(&bat_priv->gw.bandwidth_down, 100);
atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 66b25e4..246f21b 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops {
/**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ * @init_sel_class: initialize GW selection class (optional)
* @store_sel_class: parse and stores a new GW selection class (optional)
* @show_sel_class: prints the current GW selection class (optional)
* @get_best_gw_node: select the best GW from the list of available nodes
@@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops {
* @dump: dump gateways to a netlink socket (optional)
*/
struct batadv_algo_gw_ops {
+ void (*init_sel_class)(struct batadv_priv *bat_priv);
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count);
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4f598dc..6e08b71 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
- WARN_ON_ONCE(!br_hash_lock_held(br));
+ lockdep_assert_held_once(&br->hash_lock);
rcu_read_lock();
fdb = fdb_find_rcu(head, addr, vid);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index fa87fbd..1f1e620 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
- unsigned int mtu_reserved;
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ unsigned int mtu, mtu_reserved;
mtu_reserved = nf_bridge_mtu_reduction(skb);
+ mtu = skb->dev->mtu;
- if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) {
+ if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
+ mtu = nf_bridge->frag_max_size;
+
+ if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
}
- nf_bridge = nf_bridge_info_get(skb);
-
/* This is wrong! We should preserve the original fragment
* boundaries by preserving frag_list rather than refragmenting.
*/
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2288fca..6136818 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid);
-static inline bool br_hash_lock_held(struct net_bridge *br)
-{
-#ifdef CONFIG_LOCKDEP
- return lockdep_is_held(&br->hash_lock);
-#else
- return true;
-#endif
-}
-
/* br_forward.c */
enum br_pkt_type {
BR_PKT_UNICAST,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 38dcf1e..f76bb33 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/net.h>
#include <linux/nsproxy.h>
+#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
@@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
{
struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
struct socket *sock;
+ unsigned int noio_flag;
int ret;
BUG_ON(con->sock);
+
+ /* sock_create_kern() allocates with GFP_KERNEL */
+ noio_flag = memalloc_noio_save();
ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
+ memalloc_noio_restore(noio_flag);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6ae5603..029a61a 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
return 0;
}
-static void update_classid(struct cgroup_subsys_state *css, void *v)
-{
- struct css_task_iter it;
- struct task_struct *p;
-
- css_task_iter_start(css, &it);
- while ((p = css_task_iter_next(&it))) {
- task_lock(p);
- iterate_fd(p->files, 0, update_classid_sock, v);
- task_unlock(p);
- }
- css_task_iter_end(&it);
-}
-
static void cgrp_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
+ struct task_struct *p;
- cgroup_taskset_first(tset, &css);
- update_classid(css,
- (void *)(unsigned long)css_cls_state(css)->classid);
+ cgroup_taskset_for_each(p, css, tset) {
+ task_lock(p);
+ iterate_fd(p->files, 0, update_classid_sock,
+ (void *)(unsigned long)css_cls_state(css)->classid);
+ task_unlock(p);
+ }
}
static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
u64 value)
{
struct cgroup_cls_state *cs = css_cls_state(css);
+ struct css_task_iter it;
+ struct task_struct *p;
cgroup_sk_alloc_disable();
cs->classid = (u32)value;
- update_classid(css, (void *)(unsigned long)cs->classid);
+ css_task_iter_start(css, &it);
+ while ((p = css_task_iter_next(&it))) {
+ task_lock(p);
+ iterate_fd(p->files, 0, update_classid_sock,
+ (void *)(unsigned long)cs->classid);
+ task_unlock(p);
+ }
+ css_task_iter_end(&it);
+
return 0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cd4ba8c..9f78109 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb)
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
+static void skb_set_err_queue(struct sk_buff *skb)
+{
+ /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
+ * So, it is safe to (mis)use it to mark skbs on the error queue.
+ */
+ skb->pkt_type = PACKET_OUTGOING;
+ BUILD_BUG_ON(PACKET_OUTGOING == 0);
+}
+
/*
* Note: We dont mem charge error packets (no sk_forward_alloc changes)
*/
@@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_rmem_free;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ skb_set_err_queue(skb);
/* before exiting rcu section, make sure dst is refcounted */
skb_dst_force(skb);
@@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk);
static void __skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock *sk,
- int tstype)
+ int tstype,
+ bool opt_stats)
{
struct sock_exterr_skb *serr;
int err;
+ BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
+
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
serr->ee.ee_info = tstype;
+ serr->opt_stats = opt_stats;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk->sk_protocol == IPPROTO_TCP &&
@@ -3833,7 +3847,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
*/
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
*skb_hwtstamps(skb) = *hwtstamps;
- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
sock_put(sk);
}
}
@@ -3844,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
struct sock *sk, int tstype)
{
struct sk_buff *skb;
- bool tsonly;
+ bool tsonly, opt_stats = false;
if (!sk)
return;
@@ -3857,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
#ifdef CONFIG_INET
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM)
+ sk->sk_type == SOCK_STREAM) {
skb = tcp_get_timestamping_opt_stats(sk);
- else
+ opt_stats = true;
+ } else
#endif
skb = alloc_skb(0, GFP_ATOMIC);
} else {
@@ -3878,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
else
skb->tstamp = ktime_get_real();
- __skb_complete_tx_timestamp(skb, sk, tstype);
+ __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
}
EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
diff --git a/net/core/sock.c b/net/core/sock.c
index a96d5f7..2c4f574 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head)
pr_debug("%s: optmem leakage (%d bytes) detected\n",
__func__, atomic_read(&sk->sk_omem_alloc));
+ if (sk->sk_frag.page) {
+ put_page(sk->sk_frag.page);
+ sk->sk_frag.page = NULL;
+ }
+
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
@@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
is_charged = sk_filter_charge(newsk, filter);
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+ /* We need to make sure that we don't uncharge the new
+ * socket if we couldn't charge it in the first place
+ * as otherwise we uncharge the parent's filter.
+ */
+ if (!is_charged)
+ RCU_INIT_POINTER(newsk->sk_filter, NULL);
sk_free_unlock_clone(newsk);
newsk = NULL;
goto out;
@@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk)
sk_refcnt_debug_release(sk);
- if (sk->sk_frag.page) {
- put_page(sk->sk_frag.page);
- sk->sk_frag.page = NULL;
- }
-
sock_put(sk);
}
EXPORT_SYMBOL(sk_common_release);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 42bfd08..8f2133f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb)
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+ if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+ skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index bbe7f72..b3cdeec 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg)
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags);
+ rcu_read_lock();
spin_lock(&qp->q.lock);
if (qp->q.flags & INET_FRAG_COMPLETE)
@@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg)
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
if (!inet_frag_evicting(&qp->q)) {
- struct sk_buff *head = qp->q.fragments;
+ struct sk_buff *clone, *head = qp->q.fragments;
const struct iphdr *iph;
int err;
@@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg)
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
goto out;
- rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
- goto out_rcu_unlock;
+ goto out;
+
/* skb has no dst, perform route lookup again */
iph = ip_hdr(head);
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
if (err)
- goto out_rcu_unlock;
+ goto out;
/* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
if (frag_expire_skip_icmp(qp->user) &&
(skb_rtable(head)->rt_type != RTN_LOCAL))
- goto out_rcu_unlock;
+ goto out;
+
+ clone = skb_clone(head, GFP_ATOMIC);
/* Send an ICMP "Fragment Reassembly Timeout" message. */
- icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
-out_rcu_unlock:
- rcu_read_unlock();
+ if (clone) {
+ spin_unlock(&qp->q.lock);
+ icmp_send(clone, ICMP_TIME_EXCEEDED,
+ ICMP_EXC_FRAGTIME, 0);
+ consume_skb(clone);
+ goto out_rcu_unlock;
+ }
}
out:
spin_unlock(&qp->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
ipq_put(qp);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index bc1486f..2e14ed1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
+
+ if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
+ return NF_ACCEPT;
+
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
}
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index f8aad03..6f5e8d0 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
/* maniptype == SRC for postrouting. */
enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
- /* We never see fragments: conntrack defrags on pre-routing
- * and local-out, and nf_nat_out protects post-routing.
- */
- NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would
* have dropped it. Hence it's the user's responsibilty to
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index a0ea8aa..f186772 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_min];
- range.max_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_max];
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_max]);
}
regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt),
&range, nft_out(pkt));
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index 1650ed2..5120be1 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
- mr.range[0].min.all =
- *(__be16 *)®s->data[priv->sreg_proto_min];
- mr.range[0].max.all =
- *(__be16 *)®s->data[priv->sreg_proto_max];
+ mr.range[0].min.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_min]);
+ mr.range[0].max.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_max]);
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index cf45555..1e319a5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 now = tcp_time_stamp, intv;
+ u32 now, intv;
u64 rate64;
bool slow;
u32 rate;
@@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out;
+ now = tcp_time_stamp;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 39c393c..c431197 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5541,6 +5541,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
+ icsk->icsk_ack.lrcvtime = tcp_time_stamp;
if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5759,7 +5760,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7e16243..65c0f3d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0;
newtp->retrans_out = 0;
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 6c5b5b1..4146536 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
range.flags = priv->flags;
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_min];
- range.max_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_max];
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_max]);
}
regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range,
nft_out(pkt));
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index f5ac080..a27e424 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_min],
- range.max_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_max],
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_max]);
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 35c58b6..9db14189 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3423,6 +3423,8 @@ static int rt6_fill_node(struct net *net,
}
else if (rt->rt6i_flags & RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL;
+ else if (rt->rt6i_flags & RTF_ANYCAST)
+ rtm->rtm_type = RTN_ANYCAST;
else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4e4c401..e28082f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.hlimit = -1;
ipc6.tclass = -1;
ipc6.dontfrag = -1;
+ sockc.tsflags = sk->sk_tsflags;
/* destination address check */
if (sin6) {
@@ -1159,7 +1160,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_mark = sk->sk_mark;
fl6.flowi6_uid = sk->sk_uid;
- sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
opt = &opt_space;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 33211f9..6414079 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
+ unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
+ unsigned int alive;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event)
if (!rt)
continue;
+ alive = 0;
change_nexthops(rt) {
if (rtnl_dereference(nh->nh_dev) != dev)
- continue;
+ goto next;
+
switch (event) {
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
@@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event)
/* fall through */
case NETDEV_CHANGE:
nh->nh_flags |= RTNH_F_LINKDOWN;
- if (event != NETDEV_UNREGISTER)
- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
break;
}
if (event == NETDEV_UNREGISTER)
RCU_INIT_POINTER(nh->nh_dev, NULL);
+next:
+ if (!(nh->nh_flags & nh_flags))
+ alive++;
} endfor_nexthops(rt);
+
+ WRITE_ONCE(rt->rt_nhn_alive, alive);
}
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 071b97f..ffb78e5 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
seqcount_t nf_conntrack_generation __read_mostly;
-DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
+/* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used
+ * for the nfctinfo. We cheat by (ab)using the PER CPU cache line
+ * alignment to enforce this.
+ */
+DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
static unsigned int nf_conntrack_hash_rnd __read_mostly;
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 31d3586..804e8a0 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb,
enum nf_nat_manip_type maniptype)
{
sctp_sctphdr_t *hdr;
+ int hdrsize = 8;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ /* This could be an inner header returned in imcp packet; in such
+ * cases we cannot update the checksum field since it is outside
+ * of the 8 bytes of transport layer headers we are guaranteed.
+ */
+ if (skb->len >= hdroff + sizeof(*hdr))
+ hdrsize = sizeof(*hdr);
+
+ if (!skb_make_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct sctphdr *)(skb->data + hdroff);
@@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb,
hdr->dest = tuple->dst.u.sctp.port;
}
+ if (hdrsize < sizeof(*hdr))
+ return true;
+
if (skb->ip_summed != CHECKSUM_PARTIAL) {
hdr->checksum = sctp_compute_cksum(skb, hdroff);
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5e0ccfd..434c739 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_bind_check_setelem;
- iter.flush = false;
set->ops->walk(ctx, set, &iter);
if (iter.err < 0)
@@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
args.iter.count = 0;
args.iter.err = 0;
args.iter.fn = nf_tables_dump_setelem;
- args.iter.flush = false;
set->ops->walk(&ctx, set, &args.iter);
nla_nest_end(skb, nest);
@@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
struct nft_set_iter iter = {
.genmask = genmask,
.fn = nft_flush_set,
- .flush = true,
};
set->ops->walk(&ctx, set, &iter);
@@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_loop_check_setelem;
- iter.flush = false;
set->ops->walk(ctx, set, &iter);
if (iter.err < 0)
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bf548a7..0264258 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
switch (priv->key) {
case NFT_CT_DIRECTION:
- *dest = CTINFO2DIR(ctinfo);
+ nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return;
case NFT_CT_STATUS:
*dest = ct->status;
@@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
return;
}
case NFT_CT_L3PROTOCOL:
- *dest = nf_ct_l3num(ct);
+ nft_reg_store8(dest, nf_ct_l3num(ct));
return;
case NFT_CT_PROTOCOL:
- *dest = nf_ct_protonum(ct);
+ nft_reg_store8(dest, nf_ct_protonum(ct));
return;
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE: {
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+ u16 zoneid;
if (priv->dir < IP_CT_DIR_MAX)
- *dest = nf_ct_zone_id(zone, priv->dir);
+ zoneid = nf_ct_zone_id(zone, priv->dir);
else
- *dest = zone->id;
+ zoneid = zone->id;
+ nft_reg_store16(dest, zoneid);
return;
}
#endif
@@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
return;
case NFT_CT_PROTO_SRC:
- *dest = (__force __u16)tuple->src.u.all;
+ nft_reg_store16(dest, (__force u16)tuple->src.u.all);
return;
case NFT_CT_PROTO_DST:
- *dest = (__force __u16)tuple->dst.u.all;
+ nft_reg_store16(dest, (__force u16)tuple->dst.u.all);
return;
default:
break;
@@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
const struct nft_ct *priv = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
enum ip_conntrack_info ctinfo;
- u16 value = regs->data[priv->sreg];
+ u16 value = nft_reg_load16(®s->data[priv->sreg]);
struct nf_conn *ct;
ct = nf_ct_get(skb, &ctinfo);
@@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
case IP_CT_DIR_REPLY:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto err1;
}
}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e1f5ca9..7b60e01 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = skb->len;
break;
case NFT_META_PROTOCOL:
- *dest = 0;
- *(__be16 *)dest = skb->protocol;
+ nft_reg_store16(dest, (__force u16)skb->protocol);
break;
case NFT_META_NFPROTO:
- *dest = nft_pf(pkt);
+ nft_reg_store8(dest, nft_pf(pkt));
break;
case NFT_META_L4PROTO:
if (!pkt->tprot_set)
goto err;
- *dest = pkt->tprot;
+ nft_reg_store8(dest, pkt->tprot);
break;
case NFT_META_PRIORITY:
*dest = skb->priority;
@@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr,
case NFT_META_IIFTYPE:
if (in == NULL)
goto err;
- *dest = 0;
- *(u16 *)dest = in->type;
+ nft_reg_store16(dest, in->type);
break;
case NFT_META_OIFTYPE:
if (out == NULL)
goto err;
- *dest = 0;
- *(u16 *)dest = out->type;
+ nft_reg_store16(dest, out->type);
break;
case NFT_META_SKUID:
sk = skb_to_full_sk(skb);
@@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr,
#endif
case NFT_META_PKTTYPE:
if (skb->pkt_type != PACKET_LOOPBACK) {
- *dest = skb->pkt_type;
+ nft_reg_store8(dest, skb->pkt_type);
break;
}
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
else
- *dest = PACKET_BROADCAST;
+ nft_reg_store8(dest, PACKET_BROADCAST);
break;
case NFPROTO_IPV6:
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
break;
case NFPROTO_NETDEV:
switch (skb->protocol) {
@@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr,
goto err;
if (ipv4_is_multicast(iph->daddr))
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
else
- *dest = PACKET_BROADCAST;
+ nft_reg_store8(dest, PACKET_BROADCAST);
break;
}
case htons(ETH_P_IPV6):
- *dest = PACKET_MULTICAST;
+ nft_reg_store8(dest, PACKET_MULTICAST);
break;
default:
WARN_ON_ONCE(1);
@@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr,
{
const struct nft_meta *meta = nft_expr_priv(expr);
struct sk_buff *skb = pkt->skb;
- u32 value = regs->data[meta->sreg];
+ u32 *sreg = ®s->data[meta->sreg];
+ u32 value = *sreg;
+ u8 pkt_type;
switch (meta->key) {
case NFT_META_MARK:
@@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr,
skb->priority = value;
break;
case NFT_META_PKTTYPE:
- if (skb->pkt_type != value &&
- skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type))
- skb->pkt_type = value;
+ pkt_type = nft_reg_load8(sreg);
+
+ if (skb->pkt_type != pkt_type &&
+ skb_pkt_type_ok(pkt_type) &&
+ skb_pkt_type_ok(skb->pkt_type))
+ skb->pkt_type = pkt_type;
break;
case NFT_META_NFTRACE:
skb->nf_trace = !!value;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 19a7bf3..439e0bd 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
}
if (priv->sreg_proto_min) {
- range.min_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_min];
- range.max_proto.all =
- *(__be16 *)®s->data[priv->sreg_proto_max];
+ range.min_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_min]);
+ range.max_proto.all = (__force __be16)nft_reg_load16(
+ ®s->data[priv->sreg_proto_max]);
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 152d226..8ebbc29 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -15,6 +15,11 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+struct nft_bitmap_elem {
+ struct list_head head;
+ struct nft_set_ext ext;
+};
+
/* This bitmap uses two bits to represent one element. These two bits determine
* the element state in the current and the future generation.
*
@@ -41,13 +46,22 @@
* restore its previous state.
*/
struct nft_bitmap {
- u16 bitmap_size;
- u8 bitmap[];
+ struct list_head list;
+ u16 bitmap_size;
+ u8 bitmap[];
};
-static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off)
+static inline void nft_bitmap_location(const struct nft_set *set,
+ const void *key,
+ u32 *idx, u32 *off)
{
- u32 k = (key << 1);
+ u32 k;
+
+ if (set->klen == 2)
+ k = *(u16 *)key;
+ else
+ k = *(u8 *)key;
+ k <<= 1;
*idx = k / BITS_PER_BYTE;
*off = k % BITS_PER_BYTE;
@@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
u8 genmask = nft_genmask_cur(net);
u32 idx, off;
- nft_bitmap_location(*key, &idx, &off);
+ nft_bitmap_location(set, key, &idx, &off);
return nft_bitmap_active(priv->bitmap, idx, off, genmask);
}
+static struct nft_bitmap_elem *
+nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this,
+ u8 genmask)
+{
+ const struct nft_bitmap *priv = nft_set_priv(set);
+ struct nft_bitmap_elem *be;
+
+ list_for_each_entry_rcu(be, &priv->list, head) {
+ if (memcmp(nft_set_ext_key(&be->ext),
+ nft_set_ext_key(&this->ext), set->klen) ||
+ !nft_set_elem_active(&be->ext, genmask))
+ continue;
+
+ return be;
+ }
+ return NULL;
+}
+
static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem,
- struct nft_set_ext **_ext)
+ struct nft_set_ext **ext)
{
struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext *ext = elem->priv;
+ struct nft_bitmap_elem *new = elem->priv, *be;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
- if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
+ be = nft_bitmap_elem_find(set, new, genmask);
+ if (be) {
+ *ext = &be->ext;
return -EEXIST;
+ }
+ nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off);
/* Enter 01 state. */
priv->bitmap[idx] |= (genmask << off);
+ list_add_tail_rcu(&new->head, &priv->list);
return 0;
}
@@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext *ext = elem->priv;
+ struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 00 state. */
priv->bitmap[idx] &= ~(genmask << off);
+ list_del_rcu(&be->head);
}
static void nft_bitmap_activate(const struct net *net,
@@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext *ext = elem->priv;
+ struct nft_bitmap_elem *be = elem->priv;
u8 genmask = nft_genmask_next(net);
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 11 state. */
priv->bitmap[idx] |= (genmask << off);
+ nft_set_elem_change_active(net, set, &be->ext);
}
static bool nft_bitmap_flush(const struct net *net,
- const struct nft_set *set, void *ext)
+ const struct nft_set *set, void *_be)
{
struct nft_bitmap *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
+ struct nft_bitmap_elem *be = _be;
u32 idx, off;
- nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+ nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off);
/* Enter 10 state, similar to deactivation. */
priv->bitmap[idx] &= ~(genmask << off);
+ nft_set_elem_change_active(net, set, &be->ext);
return true;
}
-static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
- const struct nft_set_elem *elem)
-{
- struct nft_set_ext_tmpl tmpl;
- struct nft_set_ext *ext;
-
- nft_set_ext_prepare(&tmpl);
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
-
- ext = kzalloc(tmpl.len, GFP_KERNEL);
- if (!ext)
- return NULL;
-
- nft_set_ext_init(ext, &tmpl);
- memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
-
- return ext;
-}
-
static void *nft_bitmap_deactivate(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_bitmap *priv = nft_set_priv(set);
+ struct nft_bitmap_elem *this = elem->priv, *be;
u8 genmask = nft_genmask_next(net);
- struct nft_set_ext *ext;
- u32 idx, off, key = 0;
+ u32 idx, off;
- memcpy(&key, elem->key.val.data, set->klen);
- nft_bitmap_location(key, &idx, &off);
+ nft_bitmap_location(set, elem->key.val.data, &idx, &off);
- if (!nft_bitmap_active(priv->bitmap, idx, off, genmask))
- return NULL;
-
- /* We have no real set extension since this is a bitmap, allocate this
- * dummy object that is released from the commit/abort path.
- */
- ext = nft_bitmap_ext_alloc(set, elem);
- if (!ext)
+ be = nft_bitmap_elem_find(set, this, genmask);
+ if (!be)
return NULL;
/* Enter 10 state. */
priv->bitmap[idx] &= ~(genmask << off);
+ nft_set_elem_change_active(net, set, &be->ext);
- return ext;
+ return be;
}
static void nft_bitmap_walk(const struct nft_ctx *ctx,
@@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
struct nft_set_iter *iter)
{
const struct nft_bitmap *priv = nft_set_priv(set);
- struct nft_set_ext_tmpl tmpl;
+ struct nft_bitmap_elem *be;
struct nft_set_elem elem;
- struct nft_set_ext *ext;
- int idx, off;
- u16 key;
- nft_set_ext_prepare(&tmpl);
- nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+ list_for_each_entry_rcu(be, &priv->list, head) {
+ if (iter->count < iter->skip)
+ goto cont;
+ if (!nft_set_elem_active(&be->ext, iter->genmask))
+ goto cont;
- for (idx = 0; idx < priv->bitmap_size; idx++) {
- for (off = 0; off < BITS_PER_BYTE; off += 2) {
- if (iter->count < iter->skip)
- goto cont;
+ elem.priv = be;
- if (!nft_bitmap_active(priv->bitmap, idx, off,
- iter->genmask))
- goto cont;
+ iter->err = iter->fn(ctx, set, iter, &elem);
- ext = kzalloc(tmpl.len, GFP_KERNEL);
- if (!ext) {
- iter->err = -ENOMEM;
- return;
- }
- nft_set_ext_init(ext, &tmpl);
- key = ((idx * BITS_PER_BYTE) + off) >> 1;
- memcpy(nft_set_ext_key(ext), &key, set->klen);
-
- elem.priv = ext;
- iter->err = iter->fn(ctx, set, iter, &elem);
-
- /* On set flush, this dummy extension object is released
- * from the commit/abort path.
- */
- if (!iter->flush)
- kfree(ext);
-
- if (iter->err < 0)
- return;
+ if (iter->err < 0)
+ return;
cont:
- iter->count++;
- }
+ iter->count++;
}
}
@@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set,
{
struct nft_bitmap *priv = nft_set_priv(set);
+ INIT_LIST_HEAD(&priv->list);
priv->bitmap_size = nft_bitmap_size(set->klen);
return 0;
@@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
static struct nft_set_ops nft_bitmap_ops __read_mostly = {
.privsize = nft_bitmap_privsize,
+ .elemsize = offsetof(struct nft_bitmap_elem, ext),
.estimate = nft_bitmap_estimate,
.init = nft_bitmap_init,
.destroy = nft_bitmap_destroy,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7b73c7c..596eaff 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
+static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
+
+static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
+ "nlk_cb_mutex-ROUTE",
+ "nlk_cb_mutex-1",
+ "nlk_cb_mutex-USERSOCK",
+ "nlk_cb_mutex-FIREWALL",
+ "nlk_cb_mutex-SOCK_DIAG",
+ "nlk_cb_mutex-NFLOG",
+ "nlk_cb_mutex-XFRM",
+ "nlk_cb_mutex-SELINUX",
+ "nlk_cb_mutex-ISCSI",
+ "nlk_cb_mutex-AUDIT",
+ "nlk_cb_mutex-FIB_LOOKUP",
+ "nlk_cb_mutex-CONNECTOR",
+ "nlk_cb_mutex-NETFILTER",
+ "nlk_cb_mutex-IP6_FW",
+ "nlk_cb_mutex-DNRTMSG",
+ "nlk_cb_mutex-KOBJECT_UEVENT",
+ "nlk_cb_mutex-GENERIC",
+ "nlk_cb_mutex-17",
+ "nlk_cb_mutex-SCSITRANSPORT",
+ "nlk_cb_mutex-ECRYPTFS",
+ "nlk_cb_mutex-RDMA",
+ "nlk_cb_mutex-CRYPTO",
+ "nlk_cb_mutex-SMC",
+ "nlk_cb_mutex-23",
+ "nlk_cb_mutex-24",
+ "nlk_cb_mutex-25",
+ "nlk_cb_mutex-26",
+ "nlk_cb_mutex-27",
+ "nlk_cb_mutex-28",
+ "nlk_cb_mutex-29",
+ "nlk_cb_mutex-30",
+ "nlk_cb_mutex-31",
+ "nlk_cb_mutex-MAX_LINKS"
+};
+
static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb);
@@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
} else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
+ lockdep_set_class_and_name(nlk->cb_mutex,
+ nlk_cb_mutex_keys + protocol,
+ nlk_cb_mutex_key_strings[protocol]);
}
init_waitqueue_head(&nlk->wait);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index fb6e10f..92e0981 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- skb, CTRL_CMD_NEWFAMILY) < 0)
+ skb, CTRL_CMD_NEWFAMILY) < 0) {
+ n--;
break;
+ }
}
cb->args[0] = n;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 6f5fa50..1105a83 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
- SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
@@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
tun_flags |= TUNNEL_VXLAN_OPT;
opts_type = type;
break;
+ case OVS_TUNNEL_KEY_ATTR_PAD:
+ break;
default:
OVS_NLERR(log, "Unknown IP tunnel attribute %d",
type);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 3f9d8d7..b099b64 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
rxrpc_conn_retransmit_call(conn, skb);
return 0;
+ case RXRPC_PACKET_TYPE_BUSY:
+ /* Just ignore BUSY packets for now. */
+ return 0;
+
case RXRPC_PACKET_TYPE_ABORT:
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
&wtmp, sizeof(wtmp)) < 0)
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 802ac7c..5334e30 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
if (p->set_tc_index) {
+ int wlen = skb_network_offset(skb);
+
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
- if (skb_cow_head(skb, sizeof(struct iphdr)))
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
@@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break;
case htons(ETH_P_IPV6):
- if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 2a6835b..0439a1a 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
- int i;
sctp_paramhdr_t *p;
- int err;
+ int i;
/* Retrieve the SCTP per socket area. */
sp = sctp_sk((struct sock *)sk);
@@ -264,8 +263,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
- err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
- if (err)
+ if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
goto fail_init;
asoc->active_key_id = ep->active_key_id;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 71ce6b9..1224421 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -546,7 +546,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
struct sctp_association *asoc = tp->asoc;
struct sctp_chunk *chunk, *tmp;
int pkt_count, gso = 0;
- int confirm;
struct dst_entry *dst;
struct sk_buff *head;
struct sctphdr *sh;
@@ -625,13 +624,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
asoc->peer.last_sent_to = tp;
}
head->ignore_df = packet->ipfragok;
- confirm = tp->dst_pending_confirm;
- if (confirm)
+ if (tp->dst_pending_confirm)
skb_set_dst_pending_confirm(head, 1);
/* neighbour should be confirmed on successful transmission or
* positive error
*/
- if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
+ if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
+ tp->dst_pending_confirm)
tp->dst_pending_confirm = 0;
out:
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index db352e5..025ccff 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
}
static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
- struct sctp_sndrcvinfo *sinfo,
- struct list_head *queue, int msg_len)
+ struct sctp_sndrcvinfo *sinfo, int msg_len)
{
+ struct sctp_outq *q = &asoc->outqueue;
struct sctp_chunk *chk, *temp;
- list_for_each_entry_safe(chk, temp, queue, list) {
+ list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
continue;
list_del_init(&chk->list);
+ q->out_qlen -= chk->skb->len;
asoc->sent_cnt_removable--;
asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
@@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
return;
}
- sctp_prsctp_prune_unsent(asoc, sinfo,
- &asoc->outqueue.out_chunk_list,
- msg_len);
+ sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
}
/* Mark all the eligible packets on a transport for retransmission. */
diff --git a/net/socket.c b/net/socket.c
index e034fe4..985ef06 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
}
EXPORT_SYMBOL(kernel_sendmsg);
+static bool skb_is_err_queue(const struct sk_buff *skb)
+{
+ /* pkt_type of skbs enqueued on the error queue are set to
+ * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
+ * in recvmsg, since skbs received on a local socket will never
+ * have a pkt_type of PACKET_OUTGOING.
+ */
+ return skb->pkt_type == PACKET_OUTGOING;
+}
+
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
@@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET,
SCM_TIMESTAMPING, sizeof(tss), &tss);
- if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
+ if (skb_is_err_queue(skb) && skb->len &&
+ SKB_EXT_ERR(skb)->opt_stats)
put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
skb->len, skb->data);
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 81cd31a..3b332b3 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct ib_cq *sendcq, *recvcq;
int rc;
- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+ RPCRDMA_MAX_SEND_SGES);
if (max_sge < RPCRDMA_MIN_SEND_SGES) {
pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
return -ENOMEM;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 9d94e65..271cd66 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
+ struct tipc_subscriber *subscriber = sub->subscriber;
+
+ spin_lock_bh(&subscriber->lock);
+ tipc_nametbl_unsubscribe(sub);
+ spin_unlock_bh(&subscriber->lock);
/* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
@@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref)
struct tipc_subscriber *subscriber = sub->subscriber;
spin_lock_bh(&subscriber->lock);
- tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscrp_list);
atomic_dec(&tn->subscription_count);
spin_unlock_bh(&subscriber->lock);
@@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
continue;
+ tipc_nametbl_unsubscribe(sub);
tipc_subscrp_get(sub);
spin_unlock_bh(&subscriber->lock);
tipc_subscrp_delete(sub);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d485..c36757e7 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
if (s) {
struct unix_sock *u = unix_sk(s);
+ BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
}
list_del(&cursor);
+ /* Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
+ */
+ skb_queue_head_init(&hitlist);
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, inc_inflight, &hitlist);
+
/* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
@@ -350,14 +359,6 @@ void unix_gc(void)
list_move_tail(&u->link, &gc_inflight_list);
}
- /* Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, inc_inflight, &hitlist);
-
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9f770f3..6f7f675 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = {
.sendpage = sock_no_sendpage,
};
+static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ if (!transport->cancel_pkt)
+ return -EOPNOTSUPP;
+
+ return transport->cancel_pkt(vsk);
+}
+
static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
+ int cancel = 0;
vsk = container_of(work, struct vsock_sock, dwork.work);
sk = sk_vsock(vsk);
@@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
+ cancel = 1;
}
release_sock(sk);
+ if (cancel)
+ vsock_transport_cancel_pkt(vsk);
sock_put(sk);
}
@@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
err = sock_intr_errno(timeout);
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
sk->sk_state = SS_UNCONNECTED;
sock->state = SS_UNCONNECTED;
+ vsock_transport_cancel_pkt(vsk);
goto out_wait;
}
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 9d24c0e..68675a1 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return len;
}
+static int
+virtio_transport_cancel_pkt(struct vsock_sock *vsk)
+{
+ struct virtio_vsock *vsock;
+ struct virtio_vsock_pkt *pkt, *n;
+ int cnt = 0;
+ LIST_HEAD(freeme);
+
+ vsock = virtio_vsock_get();
+ if (!vsock) {
+ return -ENODEV;
+ }
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+ if (pkt->vsk != vsk)
+ continue;
+ list_move(&pkt->list, &freeme);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ list_for_each_entry_safe(pkt, n, &freeme, list) {
+ if (pkt->reply)
+ cnt++;
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+
+ if (cnt) {
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+ int new_cnt;
+
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
+ if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
+ new_cnt < virtqueue_get_vring_size(rx_vq))
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+ }
+
+ return 0;
+}
+
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
.release = virtio_transport_release,
.connect = virtio_transport_connect,
.shutdown = virtio_transport_shutdown,
+ .cancel_pkt = virtio_transport_cancel_pkt,
.dgram_bind = virtio_transport_dgram_bind,
.dgram_dequeue = virtio_transport_dgram_dequeue,
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 8d592a4..af087b4 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
pkt->len = len;
pkt->hdr.len = cpu_to_le32(len);
pkt->reply = info->reply;
+ pkt->vsk = info->vsk;
if (info->msg && len > 0) {
pkt->buf = kmalloc(len, GFP_KERNEL);
@@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
.type = type,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk)
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_REQUEST,
.type = VIRTIO_VSOCK_TYPE_STREAM,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
(mode & SEND_SHUTDOWN ?
VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk,
.type = VIRTIO_VSOCK_TYPE_STREAM,
.msg = msg,
.pkt_len = len,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
@@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
.op = VIRTIO_VSOCK_OP_RST,
.type = VIRTIO_VSOCK_TYPE_STREAM,
.reply = !!pkt,
+ .vsk = vsk,
};
/* Send RST only if the original pkt is not a RST pkt */
@@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
.remote_port = le32_to_cpu(pkt->hdr.src_port),
.reply = true,
+ .vsk = vsk,
};
return virtio_transport_send_pkt_info(vsk, &info);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d7f8be4..2312dc2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
{
int err;
- rtnl_lock();
-
if (!cb->args[0]) {
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
genl_family_attrbuf(&nl80211_fam),
nl80211_fam.maxattr, nl80211_policy);
if (err)
- goto out_unlock;
+ return err;
*wdev = __cfg80211_wdev_from_attrs(
sock_net(skb->sk),
genl_family_attrbuf(&nl80211_fam));
- if (IS_ERR(*wdev)) {
- err = PTR_ERR(*wdev);
- goto out_unlock;
- }
+ if (IS_ERR(*wdev))
+ return PTR_ERR(*wdev);
*rdev = wiphy_to_rdev((*wdev)->wiphy);
/* 0 is the first index - add 1 to parse only once */
cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
- if (!wiphy) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!wiphy)
+ return -ENODEV;
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
@@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
}
}
- if (!*wdev) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!*wdev)
+ return -ENODEV;
}
return 0;
- out_unlock:
- rtnl_unlock();
- return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
- rtnl_unlock();
}
/* IE validation */
@@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
int filter_wiphy = -1;
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
+ int ret;
rtnl_lock();
if (!cb->args[2]) {
struct nl80211_dump_wiphy_state state = {
.filter_wiphy = -1,
};
- int ret;
ret = nl80211_dump_wiphy_parse(skb, cb, &state);
if (ret)
- return ret;
+ goto out_unlock;
filter_wiphy = state.filter_wiphy;
@@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
wp_idx++;
}
out:
- rtnl_unlock();
-
cb->args[0] = wp_idx;
cb->args[1] = if_idx;
- return skb->len;
+ ret = skb->len;
+ out_unlock:
+ rtnl_unlock();
+
+ return ret;
}
static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
int sta_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!wdev->netdev) {
err = -EINVAL;
@@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
cb->args[2] = sta_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
int path_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!rdev->ops->dump_mpath) {
err = -EOPNOTSUPP;
@@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
cb->args[2] = path_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
int path_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!rdev->ops->dump_mpp) {
err = -EOPNOTSUPP;
@@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
cb->args[2] = path_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
int start = cb->args[2], idx = 0;
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
- if (err)
+ if (err) {
+ rtnl_unlock();
return err;
+ }
wdev_lock(wdev);
spin_lock_bh(&rdev->bss_lock);
@@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
wdev_unlock(wdev);
cb->args[2] = idx;
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return skb->len;
}
@@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
int res;
bool radio_stats;
+ rtnl_lock();
res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (res)
- return res;
+ goto out_err;
/* prepare_wdev_dump parsed the attributes */
radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[2] = survey_idx;
res = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return res;
}
@@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
void *data = NULL;
unsigned int data_len = 0;
- rtnl_lock();
-
if (cb->args[0]) {
/* subtract the 1 again here */
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
- if (!wiphy) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!wiphy)
+ return -ENODEV;
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
@@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
attrbuf, nl80211_fam.maxattr, nl80211_policy);
if (err)
- goto out_unlock;
+ return err;
if (!attrbuf[NL80211_ATTR_VENDOR_ID] ||
- !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
- err = -EINVAL;
- goto out_unlock;
- }
+ !attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+ return -EINVAL;
*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
if (IS_ERR(*wdev))
*wdev = NULL;
*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
- if (IS_ERR(*rdev)) {
- err = PTR_ERR(*rdev);
- goto out_unlock;
- }
+ if (IS_ERR(*rdev))
+ return PTR_ERR(*rdev);
vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]);
subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
continue;
- if (!vcmd->dumpit) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
+ if (!vcmd->dumpit)
+ return -EOPNOTSUPP;
vcmd_idx = i;
break;
}
- if (vcmd_idx < 0) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
+ if (vcmd_idx < 0)
+ return -EOPNOTSUPP;
if (attrbuf[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
/* keep rtnl locked in successful case */
return 0;
- out_unlock:
- rtnl_unlock();
- return err;
}
static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
int err;
struct nlattr *vendor_data;
+ rtnl_lock();
err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out;
vcmd_idx = cb->args[2];
data = (void *)cb->args[3];
@@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV)) {
- if (!wdev)
- return -EINVAL;
+ if (!wdev) {
+ err = -EINVAL;
+ goto out;
+ }
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
- !wdev->netdev)
- return -EINVAL;
+ !wdev->netdev) {
+ err = -EINVAL;
+ goto out;
+ }
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
- if (!wdev_running(wdev))
- return -ENETDOWN;
+ if (!wdev_running(wdev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
}
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 4c93520..f3b1d7f5 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1832,6 +1832,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
info->output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
/* remove all existing cells */
+ snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
snd_seq_pool_done(client->pool);
}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 448efd4..33980d1 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
return;
*fifo = NULL;
+ if (f->pool)
+ snd_seq_pool_mark_closing(f->pool);
+
snd_seq_fifo_clear(f);
/* wake up clients if any */
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 1a1acf3..d4c61ec 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -415,6 +415,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
return 0;
}
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+ unsigned long flags;
+
+ if (snd_BUG_ON(!pool))
+ return;
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->closing = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
/* remove events */
int snd_seq_pool_done(struct snd_seq_pool *pool)
{
@@ -425,10 +437,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
return -EINVAL;
/* wait for closing all threads */
- spin_lock_irqsave(&pool->lock, flags);
- pool->closing = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
-
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
@@ -485,6 +493,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool)
*ppool = NULL;
if (pool == NULL)
return 0;
+ snd_seq_pool_mark_closing(pool);
snd_seq_pool_done(pool);
kfree(pool);
return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec77..32f959c 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool)
int snd_seq_pool_init(struct snd_seq_pool *pool);
/* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
int snd_seq_pool_done(struct snd_seq_pool *pool);
/* create pool */
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ab4cdab..79edd88 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw)
return err;
/* Set DMA transfer mask */
- if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
} else {
dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c15c51b..69266b8 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -261,6 +261,7 @@ enum {
CXT_FIXUP_HP_530,
CXT_FIXUP_CAP_MIX_AMP_5047,
CXT_FIXUP_MUTE_LED_EAPD,
+ CXT_FIXUP_HP_DOCK,
CXT_FIXUP_HP_SPECTRE,
CXT_FIXUP_HP_GATE_MIC,
};
@@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt_fixup_mute_led_eapd,
},
+ [CXT_FIXUP_HP_DOCK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x16, 0x21011020 }, /* line-out */
+ { 0x18, 0x2181103f }, /* line-in */
+ { }
+ }
+ },
[CXT_FIXUP_HP_SPECTRE] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
+ SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -871,6 +881,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
{ .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
{ .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
+ { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" },
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4e11222..7f98989 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4847,6 +4847,7 @@ enum {
ALC286_FIXUP_HP_GPIO_LED,
ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
ALC280_FIXUP_HP_DOCK_PINS,
+ ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
ALC280_FIXUP_HP_9480M,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -5388,6 +5389,16 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC280_FIXUP_HP_GPIO4
},
+ [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x21011020 }, /* line-out */
+ { 0x18, 0x2181103f }, /* line-in */
+ { },
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
+ },
[ALC280_FIXUP_HP_9480M] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc280_fixup_hp_9480m,
@@ -5647,7 +5658,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
- SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -5816,6 +5827,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+ {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
@@ -6090,6 +6102,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC295_STANDARD_PINS,
{0x17, 0x21014040},
{0x18, 0x21a19050}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x17, 0x90170110}),
diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig
index 84c8f8fc..8adf4d1 100644
--- a/sound/x86/Kconfig
+++ b/sound/x86/Kconfig
@@ -1,6 +1,7 @@
menuconfig SND_X86
- tristate "X86 sound devices"
+ bool "X86 sound devices"
depends on X86
+ default y
---help---
X86 sound devices that don't fall under SoC or PCI categories
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index c24b3e3a..77a28a2 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -7,6 +7,7 @@
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
#include <asm/types.h>
+#include <asm/posix_types.h>
struct page;
struct kmem_cache;
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
new file mode 100644
index 0000000..813afd6
--- /dev/null
+++ b/tools/include/uapi/linux/fcntl.h
@@ -0,0 +1,72 @@
+#ifndef _UAPI_LINUX_FCNTL_H
+#define _UAPI_LINUX_FCNTL_H
+
+#include <asm/fcntl.h>
+
+#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
+#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
+
+/*
+ * Cancel a blocking posix lock; internal use only until we expose an
+ * asynchronous lock api to userspace:
+ */
+#define F_CANCELLK (F_LINUX_SPECIFIC_BASE + 5)
+
+/* Create a file descriptor with FD_CLOEXEC set. */
+#define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6)
+
+/*
+ * Request nofications on a directory.
+ * See below for events that may be notified.
+ */
+#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
+
+/*
+ * Set and get of pipe page size array
+ */
+#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
+#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
+
+/*
+ * Set/Get seals
+ */
+#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
+#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
+
+/*
+ * Types of seals
+ */
+#define F_SEAL_SEAL 0x0001 /* prevent further seals from being set */
+#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
+#define F_SEAL_GROW 0x0004 /* prevent file from growing */
+#define F_SEAL_WRITE 0x0008 /* prevent writes */
+/* (1U << 31) is reserved for signed error codes */
+
+/*
+ * Types of directory notifications that may be requested.
+ */
+#define DN_ACCESS 0x00000001 /* File accessed */
+#define DN_MODIFY 0x00000002 /* File modified */
+#define DN_CREATE 0x00000004 /* File created */
+#define DN_DELETE 0x00000008 /* File removed */
+#define DN_RENAME 0x00000010 /* File renamed */
+#define DN_ATTRIB 0x00000020 /* File changed attibutes */
+#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+
+#define AT_FDCWD -100 /* Special value used to indicate
+ openat should use the current
+ working directory. */
+#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
+#define AT_REMOVEDIR 0x200 /* Remove directory instead of
+ unlinking file. */
+#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
+#define AT_NO_AUTOMOUNT 0x800 /* Suppress terminal automount traversal */
+#define AT_EMPTY_PATH 0x1000 /* Allow empty relative pathname */
+
+#define AT_STATX_SYNC_TYPE 0x6000 /* Type of synchronisation required from statx() */
+#define AT_STATX_SYNC_AS_STAT 0x0000 /* - Do whatever stat() does */
+#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
+#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
+
+
+#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
new file mode 100644
index 0000000..51a6b86
--- /dev/null
+++ b/tools/include/uapi/linux/stat.h
@@ -0,0 +1,176 @@
+#ifndef _UAPI_LINUX_STAT_H
+#define _UAPI_LINUX_STAT_H
+
+#include <linux/types.h>
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
+
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+
+#endif
+
+/*
+ * Timestamp structure for the timestamps in struct statx.
+ *
+ * tv_sec holds the number of seconds before (negative) or after (positive)
+ * 00:00:00 1st January 1970 UTC.
+ *
+ * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
+ * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
+ *
+ * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
+ * either be both positive or both negative.
+ *
+ * __reserved is held in case we need a yet finer resolution.
+ */
+struct statx_timestamp {
+ __s64 tv_sec;
+ __s32 tv_nsec;
+ __s32 __reserved;
+};
+
+/*
+ * Structures for the extended file attribute retrieval system call
+ * (statx()).
+ *
+ * The caller passes a mask of what they're specifically interested in as a
+ * parameter to statx(). What statx() actually got will be indicated in
+ * st_mask upon return.
+ *
+ * For each bit in the mask argument:
+ *
+ * - if the datum is not supported:
+ *
+ * - the bit will be cleared, and
+ *
+ * - the datum will be set to an appropriate fabricated value if one is
+ * available (eg. CIFS can take a default uid and gid), otherwise
+ *
+ * - the field will be cleared;
+ *
+ * - otherwise, if explicitly requested:
+ *
+ * - the datum will be synchronised to the server if AT_STATX_FORCE_SYNC is
+ * set or if the datum is considered out of date, and
+ *
+ * - the field will be filled in and the bit will be set;
+ *
+ * - otherwise, if not requested, but available in approximate form without any
+ * effort, it will be filled in anyway, and the bit will be set upon return
+ * (it might not be up to date, however, and no attempt will be made to
+ * synchronise the internal state first);
+ *
+ * - otherwise the field and the bit will be cleared before returning.
+ *
+ * Items in STATX_BASIC_STATS may be marked unavailable on return, but they
+ * will have values installed for compatibility purposes so that stat() and
+ * co. can be emulated in userspace.
+ */
+struct statx {
+ /* 0x00 */
+ __u32 stx_mask; /* What results were written [uncond] */
+ __u32 stx_blksize; /* Preferred general I/O size [uncond] */
+ __u64 stx_attributes; /* Flags conveying information about the file [uncond] */
+ /* 0x10 */
+ __u32 stx_nlink; /* Number of hard links */
+ __u32 stx_uid; /* User ID of owner */
+ __u32 stx_gid; /* Group ID of owner */
+ __u16 stx_mode; /* File mode */
+ __u16 __spare0[1];
+ /* 0x20 */
+ __u64 stx_ino; /* Inode number */
+ __u64 stx_size; /* File size */
+ __u64 stx_blocks; /* Number of 512-byte blocks allocated */
+ __u64 __spare1[1];
+ /* 0x40 */
+ struct statx_timestamp stx_atime; /* Last access time */
+ struct statx_timestamp stx_btime; /* File creation time */
+ struct statx_timestamp stx_ctime; /* Last attribute change time */
+ struct statx_timestamp stx_mtime; /* Last data modification time */
+ /* 0x80 */
+ __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
+ __u32 stx_rdev_minor;
+ __u32 stx_dev_major; /* ID of device containing file [uncond] */
+ __u32 stx_dev_minor;
+ /* 0x90 */
+ __u64 __spare2[14]; /* Spare space for future expansion */
+ /* 0x100 */
+};
+
+/*
+ * Flags to be stx_mask
+ *
+ * Query request/result mask for statx() and struct statx::stx_mask.
+ *
+ * These bits should be set in the mask argument of statx() to request
+ * particular items when calling statx().
+ */
+#define STATX_TYPE 0x00000001U /* Want/got stx_mode & S_IFMT */
+#define STATX_MODE 0x00000002U /* Want/got stx_mode & ~S_IFMT */
+#define STATX_NLINK 0x00000004U /* Want/got stx_nlink */
+#define STATX_UID 0x00000008U /* Want/got stx_uid */
+#define STATX_GID 0x00000010U /* Want/got stx_gid */
+#define STATX_ATIME 0x00000020U /* Want/got stx_atime */
+#define STATX_MTIME 0x00000040U /* Want/got stx_mtime */
+#define STATX_CTIME 0x00000080U /* Want/got stx_ctime */
+#define STATX_INO 0x00000100U /* Want/got stx_ino */
+#define STATX_SIZE 0x00000200U /* Want/got stx_size */
+#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */
+#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
+#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
+#define STATX_ALL 0x00000fffU /* All currently supported flags */
+
+/*
+ * Attributes to be found in stx_attributes
+ *
+ * These give information about the features or the state of a file that might
+ * be of use to ordinary userspace programs such as GUIs or ls rather than
+ * specialised tools.
+ *
+ * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS
+ * semantically. Where possible, the numerical value is picked to correspond
+ * also.
+ */
+#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */
+#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */
+#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */
+#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
+#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
+
+#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
+
+
+#endif /* _UAPI_LINUX_STAT_H */
diff --git a/tools/perf/Build b/tools/perf/Build
index 9b79f8d..bd8eeb6 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -50,5 +50,6 @@
libperf-y += arch/
libperf-y += ui/
libperf-y += scripts/
+libperf-y += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 28648c0..89018c7 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -73,9 +73,11 @@
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/bpf_common.h
+tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/hw_breakpoint.h
tools/include/uapi/linux/mman.h
tools/include/uapi/linux/perf_event.h
+tools/include/uapi/linux/stat.h
tools/include/linux/poison.h
tools/include/linux/rbtree.h
tools/include/linux/rbtree_augmented.h
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index a3c3e1c..4268f77 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -1,5 +1,10 @@
+#include <string.h>
+#include <regex.h>
+
#include "../../perf.h"
+#include "../../util/util.h"
#include "../../util/perf_regs.h"
+#include "../../util/debug.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(r0, PERF_REG_POWERPC_R0),
@@ -47,3 +52,109 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
SMPL_REG_END
};
+
+/* REG or %rREG */
+#define SDT_OP_REGEX1 "^(%r)?([1-2]?[0-9]|3[0-1])$"
+
+/* -NUM(REG) or NUM(REG) or -NUM(%rREG) or NUM(%rREG) */
+#define SDT_OP_REGEX2 "^(\\-)?([0-9]+)\\((%r)?([1-2]?[0-9]|3[0-1])\\)$"
+
+static regex_t sdt_op_regex1, sdt_op_regex2;
+
+static int sdt_init_op_regex(void)
+{
+ static int initialized;
+ int ret = 0;
+
+ if (initialized)
+ return 0;
+
+ ret = regcomp(&sdt_op_regex1, SDT_OP_REGEX1, REG_EXTENDED);
+ if (ret)
+ goto error;
+
+ ret = regcomp(&sdt_op_regex2, SDT_OP_REGEX2, REG_EXTENDED);
+ if (ret)
+ goto free_regex1;
+
+ initialized = 1;
+ return 0;
+
+free_regex1:
+ regfree(&sdt_op_regex1);
+error:
+ pr_debug4("Regex compilation error.\n");
+ return ret;
+}
+
+/*
+ * Parse OP and convert it into uprobe format, which is, +/-NUM(%gprREG).
+ * Possible variants of OP are:
+ * Format Example
+ * -------------------------
+ * NUM(REG) 48(18)
+ * -NUM(REG) -48(18)
+ * NUM(%rREG) 48(%r18)
+ * -NUM(%rREG) -48(%r18)
+ * REG 18
+ * %rREG %r18
+ * iNUM i0
+ * i-NUM i-1
+ *
+ * SDT marker arguments on Powerpc uses %rREG form with -mregnames flag
+ * and REG form with -mno-regnames. Here REG is general purpose register,
+ * which is in 0 to 31 range.
+ */
+int arch_sdt_arg_parse_op(char *old_op, char **new_op)
+{
+ int ret, new_len;
+ regmatch_t rm[5];
+ char prefix;
+
+ /* Constant argument. Uprobe does not support it */
+ if (old_op[0] == 'i') {
+ pr_debug4("Skipping unsupported SDT argument: %s\n", old_op);
+ return SDT_ARG_SKIP;
+ }
+
+ ret = sdt_init_op_regex();
+ if (ret < 0)
+ return ret;
+
+ if (!regexec(&sdt_op_regex1, old_op, 3, rm, 0)) {
+ /* REG or %rREG --> %gprREG */
+
+ new_len = 5; /* % g p r NULL */
+ new_len += (int)(rm[2].rm_eo - rm[2].rm_so);
+
+ *new_op = zalloc(new_len);
+ if (!*new_op)
+ return -ENOMEM;
+
+ scnprintf(*new_op, new_len, "%%gpr%.*s",
+ (int)(rm[2].rm_eo - rm[2].rm_so), old_op + rm[2].rm_so);
+ } else if (!regexec(&sdt_op_regex2, old_op, 5, rm, 0)) {
+ /*
+ * -NUM(REG) or NUM(REG) or -NUM(%rREG) or NUM(%rREG) -->
+ * +/-NUM(%gprREG)
+ */
+ prefix = (rm[1].rm_so == -1) ? '+' : '-';
+
+ new_len = 8; /* +/- ( % g p r ) NULL */
+ new_len += (int)(rm[2].rm_eo - rm[2].rm_so);
+ new_len += (int)(rm[4].rm_eo - rm[4].rm_so);
+
+ *new_op = zalloc(new_len);
+ if (!*new_op)
+ return -ENOMEM;
+
+ scnprintf(*new_op, new_len, "%c%.*s(%%gpr%.*s)", prefix,
+ (int)(rm[2].rm_eo - rm[2].rm_so), old_op + rm[2].rm_so,
+ (int)(rm[4].rm_eo - rm[4].rm_so), old_op + rm[4].rm_so);
+ } else {
+ pr_debug4("Skipping unsupported SDT argument: %s\n", old_op);
+ return SDT_ARG_SKIP;
+ }
+
+ return SDT_ARG_VALID;
+}
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index e93ef0b3..5aef183 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -338,6 +338,7 @@
329 common pkey_mprotect sys_pkey_mprotect
330 common pkey_alloc sys_pkey_alloc
331 common pkey_free sys_pkey_free
+332 common statx sys_statx
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index d8a8dcf..3bf3548 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -1,8 +1,10 @@
#include <string.h>
+#include <regex.h>
#include "../../perf.h"
#include "../../util/util.h"
#include "../../util/perf_regs.h"
+#include "../../util/debug.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(AX, PERF_REG_X86_AX),
@@ -37,15 +39,23 @@ struct sdt_name_reg {
#define SDT_NAME_REG(n, m) {.sdt_name = "%" #n, .uprobe_name = "%" #m}
#define SDT_NAME_REG_END {.sdt_name = NULL, .uprobe_name = NULL}
-static const struct sdt_name_reg sdt_reg_renamings[] = {
+static const struct sdt_name_reg sdt_reg_tbl[] = {
SDT_NAME_REG(eax, ax),
SDT_NAME_REG(rax, ax),
+ SDT_NAME_REG(al, ax),
+ SDT_NAME_REG(ah, ax),
SDT_NAME_REG(ebx, bx),
SDT_NAME_REG(rbx, bx),
+ SDT_NAME_REG(bl, bx),
+ SDT_NAME_REG(bh, bx),
SDT_NAME_REG(ecx, cx),
SDT_NAME_REG(rcx, cx),
+ SDT_NAME_REG(cl, cx),
+ SDT_NAME_REG(ch, cx),
SDT_NAME_REG(edx, dx),
SDT_NAME_REG(rdx, dx),
+ SDT_NAME_REG(dl, dx),
+ SDT_NAME_REG(dh, dx),
SDT_NAME_REG(esi, si),
SDT_NAME_REG(rsi, si),
SDT_NAME_REG(sil, si),
@@ -87,45 +97,158 @@ static const struct sdt_name_reg sdt_reg_renamings[] = {
SDT_NAME_REG_END,
};
-int sdt_rename_register(char **pdesc, char *old_name)
+/*
+ * Perf only supports OP which is in +/-NUM(REG) form.
+ * Here plus-minus sign, NUM and parenthesis are optional,
+ * only REG is mandatory.
+ *
+ * SDT events also supports indirect addressing mode with a
+ * symbol as offset, scaled mode and constants in OP. But
+ * perf does not support them yet. Below are few examples.
+ *
+ * OP with scaled mode:
+ * (%rax,%rsi,8)
+ * 10(%ras,%rsi,8)
+ *
+ * OP with indirect addressing mode:
+ * check_action(%rip)
+ * mp_+52(%rip)
+ * 44+mp_(%rip)
+ *
+ * OP with constant values:
+ * $0
+ * $123
+ * $-1
+ */
+#define SDT_OP_REGEX "^([+\\-]?)([0-9]*)(\\(?)(%[a-z][a-z0-9]+)(\\)?)$"
+
+static regex_t sdt_op_regex;
+
+static int sdt_init_op_regex(void)
{
- const struct sdt_name_reg *rnames = sdt_reg_renamings;
- char *new_desc, *old_desc = *pdesc;
- size_t prefix_len, sdt_len, uprobe_len, old_desc_len, offset;
- int ret = -1;
+ static int initialized;
+ int ret = 0;
- while (ret != 0 && rnames->sdt_name != NULL) {
- sdt_len = strlen(rnames->sdt_name);
- ret = strncmp(old_name, rnames->sdt_name, sdt_len);
- rnames += !!ret;
- }
-
- if (rnames->sdt_name == NULL)
+ if (initialized)
return 0;
- sdt_len = strlen(rnames->sdt_name);
- uprobe_len = strlen(rnames->uprobe_name);
- old_desc_len = strlen(old_desc) + 1;
+ ret = regcomp(&sdt_op_regex, SDT_OP_REGEX, REG_EXTENDED);
+ if (ret < 0) {
+ pr_debug4("Regex compilation error.\n");
+ return ret;
+ }
- new_desc = zalloc(old_desc_len + uprobe_len - sdt_len);
- if (new_desc == NULL)
- return -1;
-
- /* Copy the chars before the register name (at least '%') */
- prefix_len = old_name - old_desc;
- memcpy(new_desc, old_desc, prefix_len);
-
- /* Copy the new register name */
- memcpy(new_desc + prefix_len, rnames->uprobe_name, uprobe_len);
-
- /* Copy the chars after the register name (if need be) */
- offset = prefix_len + sdt_len;
- if (offset < old_desc_len)
- memcpy(new_desc + prefix_len + uprobe_len,
- old_desc + offset, old_desc_len - offset);
-
- free(old_desc);
- *pdesc = new_desc;
-
+ initialized = 1;
return 0;
}
+
+/*
+ * Max x86 register name length is 5(ex: %r15d). So, 6th char
+ * should always contain NULL. This helps to find register name
+ * length using strlen, insted of maintaing one more variable.
+ */
+#define SDT_REG_NAME_SIZE 6
+
+/*
+ * The uprobe parser does not support all gas register names;
+ * so, we have to replace them (ex. for x86_64: %rax -> %ax).
+ * Note: If register does not require renaming, just copy
+ * paste as it is, but don't leave it empty.
+ */
+static void sdt_rename_register(char *sdt_reg, int sdt_len, char *uprobe_reg)
+{
+ int i = 0;
+
+ for (i = 0; sdt_reg_tbl[i].sdt_name != NULL; i++) {
+ if (!strncmp(sdt_reg_tbl[i].sdt_name, sdt_reg, sdt_len)) {
+ strcpy(uprobe_reg, sdt_reg_tbl[i].uprobe_name);
+ return;
+ }
+ }
+
+ strncpy(uprobe_reg, sdt_reg, sdt_len);
+}
+
+int arch_sdt_arg_parse_op(char *old_op, char **new_op)
+{
+ char new_reg[SDT_REG_NAME_SIZE] = {0};
+ int new_len = 0, ret;
+ /*
+ * rm[0]: +/-NUM(REG)
+ * rm[1]: +/-
+ * rm[2]: NUM
+ * rm[3]: (
+ * rm[4]: REG
+ * rm[5]: )
+ */
+ regmatch_t rm[6];
+ /*
+ * Max prefix length is 2 as it may contains sign(+/-)
+ * and displacement 0 (Both sign and displacement 0 are
+ * optional so it may be empty). Use one more character
+ * to hold last NULL so that strlen can be used to find
+ * prefix length, instead of maintaing one more variable.
+ */
+ char prefix[3] = {0};
+
+ ret = sdt_init_op_regex();
+ if (ret < 0)
+ return ret;
+
+ /*
+ * If unsupported OR does not match with regex OR
+ * register name too long, skip it.
+ */
+ if (strchr(old_op, ',') || strchr(old_op, '$') ||
+ regexec(&sdt_op_regex, old_op, 6, rm, 0) ||
+ rm[4].rm_eo - rm[4].rm_so > SDT_REG_NAME_SIZE) {
+ pr_debug4("Skipping unsupported SDT argument: %s\n", old_op);
+ return SDT_ARG_SKIP;
+ }
+
+ /*
+ * Prepare prefix.
+ * If SDT OP has parenthesis but does not provide
+ * displacement, add 0 for displacement.
+ * SDT Uprobe Prefix
+ * -----------------------------
+ * +24(%rdi) +24(%di) +
+ * 24(%rdi) +24(%di) +
+ * %rdi %di
+ * (%rdi) +0(%di) +0
+ * -80(%rbx) -80(%bx) -
+ */
+ if (rm[3].rm_so != rm[3].rm_eo) {
+ if (rm[1].rm_so != rm[1].rm_eo)
+ prefix[0] = *(old_op + rm[1].rm_so);
+ else if (rm[2].rm_so != rm[2].rm_eo)
+ prefix[0] = '+';
+ else
+ strncpy(prefix, "+0", 2);
+ }
+
+ /* Rename register */
+ sdt_rename_register(old_op + rm[4].rm_so, rm[4].rm_eo - rm[4].rm_so,
+ new_reg);
+
+ /* Prepare final OP which should be valid for uprobe_events */
+ new_len = strlen(prefix) +
+ (rm[2].rm_eo - rm[2].rm_so) +
+ (rm[3].rm_eo - rm[3].rm_so) +
+ strlen(new_reg) +
+ (rm[5].rm_eo - rm[5].rm_so) +
+ 1; /* NULL */
+
+ *new_op = zalloc(new_len);
+ if (!*new_op)
+ return -ENOMEM;
+
+ scnprintf(*new_op, new_len, "%.*s%.*s%.*s%.*s%.*s",
+ strlen(prefix), prefix,
+ (int)(rm[2].rm_eo - rm[2].rm_so), old_op + rm[2].rm_so,
+ (int)(rm[3].rm_eo - rm[3].rm_so), old_op + rm[3].rm_so,
+ strlen(new_reg), new_reg,
+ (int)(rm[5].rm_eo - rm[5].rm_so), old_op + rm[5].rm_so);
+
+ return SDT_ARG_VALID;
+}
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 7ae2389..1eec96a 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -301,12 +301,6 @@ void list_common_cmds_help(void)
}
}
-static int is_perf_command(const char *s)
-{
- return is_in_cmdlist(&main_cmds, s) ||
- is_in_cmdlist(&other_cmds, s);
-}
-
static const char *cmd_to_page(const char *perf_cmd)
{
char *s;
@@ -446,7 +440,6 @@ int cmd_help(int argc, const char **argv)
"perf help [--all] [--man|--web|--info] [command]",
NULL
};
- const char *alias;
int rc;
load_command_list("perf-", &main_cmds, &other_cmds);
@@ -472,12 +465,6 @@ int cmd_help(int argc, const char **argv)
return 0;
}
- alias = alias_lookup(argv[0]);
- if (alias && !is_perf_command(argv[0])) {
- printf("`perf %s' is aliased to `%s'\n", argv[0], alias);
- return 0;
- }
-
switch (help_format) {
case HELP_FORMAT_MAN:
rc = show_man_page(argv[0]);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index c88f9f2..fce278d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -31,6 +31,7 @@
#include "util/intlist.h"
#include "util/thread_map.h"
#include "util/stat.h"
+#include "trace/beauty/beauty.h"
#include "trace-event.h"
#include "util/parse-events.h"
#include "util/bpf-loader.h"
@@ -267,15 +268,6 @@ static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void
({ struct syscall_tp *fields = evsel->priv; \
fields->name.pointer(&fields->name, sample); })
-struct syscall_arg {
- unsigned long val;
- struct thread *thread;
- struct trace *trace;
- void *parm;
- u8 idx;
- u8 mask;
-};
-
struct strarray {
int offset;
int nr_entries;
@@ -771,6 +763,10 @@ static struct syscall_fmt {
.arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
{ .name = "stat", .errmsg = true, .alias = "newstat", },
{ .name = "statfs", .errmsg = true, },
+ { .name = "statx", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_FDAT, /* flags */
+ [2] = SCA_STATX_FLAGS, /* flags */
+ [3] = SCA_STATX_MASK, /* mask */ }, },
{ .name = "swapoff", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
{ .name = "swapon", .errmsg = true,
@@ -821,12 +817,21 @@ struct syscall {
void **arg_parm;
};
-static size_t fprintf_duration(unsigned long t, FILE *fp)
+/*
+ * We need to have this 'calculated' boolean because in some cases we really
+ * don't know what is the duration of a syscall, for instance, when we start
+ * a session and some threads are waiting for a syscall to finish, say 'poll',
+ * in which case all we can do is to print "( ? ) for duration and for the
+ * start timestamp.
+ */
+static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
{
double duration = (double)t / NSEC_PER_MSEC;
size_t printed = fprintf(fp, "(");
- if (duration >= 1.0)
+ if (!calculated)
+ printed += fprintf(fp, " ? ");
+ else if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
@@ -1028,13 +1033,27 @@ static bool trace__filter_duration(struct trace *trace, double t)
return t < (trace->duration_filter * NSEC_PER_MSEC);
}
-static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
+static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
{
double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
return fprintf(fp, "%10.3f ", ts);
}
+/*
+ * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are
+ * using ttrace->entry_time for a thread that receives a sys_exit without
+ * first having received a sys_enter ("poll" issued before tracing session
+ * starts, lost sys_enter exit due to ring buffer overflow).
+ */
+static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
+{
+ if (tstamp > 0)
+ return __trace__fprintf_tstamp(trace, tstamp, fp);
+
+ return fprintf(fp, " ? ");
+}
+
static bool done = false;
static bool interrupted = false;
@@ -1045,10 +1064,10 @@ static void sig_handler(int sig)
}
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
- u64 duration, u64 tstamp, FILE *fp)
+ u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
{
size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
- printed += fprintf_duration(duration, fp);
+ printed += fprintf_duration(duration, duration_calculated, fp);
if (trace->multiple_threads) {
if (trace->show_comm)
@@ -1450,7 +1469,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
duration = sample->time - ttrace->entry_time;
- printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
+ printed = trace__fprintf_entry_head(trace, trace->current, duration, true, ttrace->entry_time, trace->output);
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
@@ -1497,7 +1516,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
- trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
+ trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
}
} else {
@@ -1545,6 +1564,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
{
long ret;
u64 duration = 0;
+ bool duration_calculated = false;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
@@ -1573,6 +1593,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
duration = sample->time - ttrace->entry_time;
if (trace__filter_duration(trace, duration))
goto out;
+ duration_calculated = true;
} else if (trace->duration_filter)
goto out;
@@ -1588,7 +1609,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (trace->summary_only)
goto out;
- trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
+ trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
if (ttrace->entry_pending) {
fprintf(trace->output, "%-70s", ttrace->entry_str);
@@ -1855,7 +1876,7 @@ static int trace__pgfault(struct trace *trace,
thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
sample->ip, &al);
- trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
+ trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
fprintf(trace->output, "%sfault [",
evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index c747bfd..83fe220 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -1,7 +1,9 @@
#!/bin/sh
HEADERS='
+include/uapi/linux/fcntl.h
include/uapi/linux/perf_event.h
+include/uapi/linux/stat.h
include/linux/hash.h
include/uapi/linux/hw_breakpoint.h
arch/x86/include/asm/disabled-features.h
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 4b283d1..9217f22 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -267,71 +267,6 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
return handled;
}
-static int handle_alias(int *argcp, const char ***argv)
-{
- int envchanged = 0, ret = 0, saved_errno = errno;
- int count, option_count;
- const char **new_argv;
- const char *alias_command;
- char *alias_string;
-
- alias_command = (*argv)[0];
- alias_string = alias_lookup(alias_command);
- if (alias_string) {
- if (alias_string[0] == '!') {
- if (*argcp > 1) {
- struct strbuf buf;
-
- if (strbuf_init(&buf, PATH_MAX) < 0 ||
- strbuf_addstr(&buf, alias_string) < 0 ||
- sq_quote_argv(&buf, (*argv) + 1,
- PATH_MAX) < 0)
- die("Failed to allocate memory.");
- free(alias_string);
- alias_string = buf.buf;
- }
- ret = system(alias_string + 1);
- if (ret >= 0 && WIFEXITED(ret) &&
- WEXITSTATUS(ret) != 127)
- exit(WEXITSTATUS(ret));
- die("Failed to run '%s' when expanding alias '%s'",
- alias_string + 1, alias_command);
- }
- count = split_cmdline(alias_string, &new_argv);
- if (count < 0)
- die("Bad alias.%s string", alias_command);
- option_count = handle_options(&new_argv, &count, &envchanged);
- if (envchanged)
- die("alias '%s' changes environment variables\n"
- "You can use '!perf' in the alias to do this.",
- alias_command);
- memmove(new_argv - option_count, new_argv,
- count * sizeof(char *));
- new_argv -= option_count;
-
- if (count < 1)
- die("empty alias for %s", alias_command);
-
- if (!strcmp(alias_command, new_argv[0]))
- die("recursive alias: %s", alias_command);
-
- new_argv = realloc(new_argv, sizeof(char *) *
- (count + *argcp + 1));
- /* insert after command name */
- memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
- new_argv[count + *argcp] = NULL;
-
- *argv = new_argv;
- *argcp += count - 1;
-
- ret = 1;
- }
-
- errno = saved_errno;
-
- return ret;
-}
-
#define RUN_SETUP (1<<0)
#define USE_PAGER (1<<1)
@@ -455,25 +390,12 @@ static void execv_dashed_external(const char **argv)
static int run_argv(int *argcp, const char ***argv)
{
- int done_alias = 0;
+ /* See if it's an internal command */
+ handle_internal_command(*argcp, *argv);
- while (1) {
- /* See if it's an internal command */
- handle_internal_command(*argcp, *argv);
-
- /* .. then try the external ones */
- execv_dashed_external(*argv);
-
- /* It could be an alias -- this works around the insanity
- * of overriding "perf log" with "perf show" by having
- * alias.log = show
- */
- if (done_alias || !handle_alias(argcp, argv))
- break;
- done_alias = 1;
- }
-
- return done_alias;
+ /* .. then try the external ones */
+ execv_dashed_external(*argv);
+ return 0;
}
static void pthread__block_sigwinch(void)
@@ -606,17 +528,12 @@ int main(int argc, const char **argv)
while (1) {
static int done_help;
- int was_alias = run_argv(&argc, &argv);
+
+ run_argv(&argc, &argv);
if (errno != ENOENT)
break;
- if (was_alias) {
- fprintf(stderr, "Expansion of alias '%s' failed; "
- "'%s' is not a perf-command\n",
- cmd, argv[0]);
- goto out;
- }
if (!done_help) {
cmd = argv[0] = help_unknown_cmd(cmd);
done_help = 1;
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
new file mode 100644
index 0000000..be95ac6
--- /dev/null
+++ b/tools/perf/trace/beauty/Build
@@ -0,0 +1 @@
+libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
new file mode 100644
index 0000000..cf50be3
--- /dev/null
+++ b/tools/perf/trace/beauty/beauty.h
@@ -0,0 +1,24 @@
+#ifndef _PERF_TRACE_BEAUTY_H
+#define _PERF_TRACE_BEAUTY_H
+
+#include <linux/types.h>
+
+struct trace;
+struct thread;
+
+struct syscall_arg {
+ unsigned long val;
+ struct thread *thread;
+ struct trace *trace;
+ void *parm;
+ u8 idx;
+ u8 mask;
+};
+
+size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
+
+size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_STATX_MASK syscall_arg__scnprintf_statx_mask
+
+#endif /* _PERF_TRACE_BEAUTY_H */
diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c
new file mode 100644
index 0000000..5643b69
--- /dev/null
+++ b/tools/perf/trace/beauty/statx.c
@@ -0,0 +1,72 @@
+/*
+ * trace/beauty/statx.c
+ *
+ * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+#include <sys/types.h>
+#include <uapi/linux/fcntl.h>
+#include <uapi/linux/stat.h>
+
+size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+ if (flags == 0)
+ return scnprintf(bf, size, "SYNC_AS_STAT");
+#define P_FLAG(n) \
+ if (flags & AT_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~AT_##n; \
+ }
+
+ P_FLAG(SYMLINK_NOFOLLOW);
+ P_FLAG(REMOVEDIR);
+ P_FLAG(SYMLINK_FOLLOW);
+ P_FLAG(NO_AUTOMOUNT);
+ P_FLAG(EMPTY_PATH);
+ P_FLAG(STATX_FORCE_SYNC);
+ P_FLAG(STATX_DONT_SYNC);
+
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int printed = 0, flags = arg->val;
+
+#define P_FLAG(n) \
+ if (flags & STATX_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~STATX_##n; \
+ }
+
+ P_FLAG(TYPE);
+ P_FLAG(MODE);
+ P_FLAG(NLINK);
+ P_FLAG(UID);
+ P_FLAG(GID);
+ P_FLAG(ATIME);
+ P_FLAG(MTIME);
+ P_FLAG(CTIME);
+ P_FLAG(INO);
+ P_FLAG(SIZE);
+ P_FLAG(BLOCKS);
+ P_FLAG(BTIME);
+
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 2ae92da6..5c0ea11 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,4 +1,3 @@
-libperf-y += alias.o
libperf-y += annotate.o
libperf-y += block-range.o
libperf-y += build-id.o
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
deleted file mode 100644
index 6455471..0000000
--- a/tools/perf/util/alias.c
+++ /dev/null
@@ -1,78 +0,0 @@
-#include "cache.h"
-#include "util.h"
-#include "config.h"
-
-static const char *alias_key;
-static char *alias_val;
-
-static int alias_lookup_cb(const char *k, const char *v,
- void *cb __maybe_unused)
-{
- if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
- if (!v)
- return config_error_nonbool(k);
- alias_val = strdup(v);
- return 0;
- }
- return 0;
-}
-
-char *alias_lookup(const char *alias)
-{
- alias_key = alias;
- alias_val = NULL;
- perf_config(alias_lookup_cb, NULL);
- return alias_val;
-}
-
-int split_cmdline(char *cmdline, const char ***argv)
-{
- int src, dst, count = 0, size = 16;
- char quoted = 0;
-
- *argv = malloc(sizeof(char*) * size);
-
- /* split alias_string */
- (*argv)[count++] = cmdline;
- for (src = dst = 0; cmdline[src];) {
- char c = cmdline[src];
- if (!quoted && isspace(c)) {
- cmdline[dst++] = 0;
- while (cmdline[++src]
- && isspace(cmdline[src]))
- ; /* skip */
- if (count >= size) {
- size += 16;
- *argv = realloc(*argv, sizeof(char*) * size);
- }
- (*argv)[count++] = cmdline + dst;
- } else if (!quoted && (c == '\'' || c == '"')) {
- quoted = c;
- src++;
- } else if (c == quoted) {
- quoted = 0;
- src++;
- } else {
- if (c == '\\' && quoted != '\'') {
- src++;
- c = cmdline[src];
- if (!c) {
- zfree(argv);
- return error("cmdline ends with \\");
- }
- }
- cmdline[dst++] = c;
- src++;
- }
- }
-
- cmdline[dst] = 0;
-
- if (quoted) {
- zfree(argv);
- return error("unclosed quote");
- }
-
- return count;
-}
-
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 512c0c8..0328f29 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -15,7 +15,6 @@
#define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
#define PERF_PAGER_ENVIRONMENT "PERF_PAGER"
-char *alias_lookup(const char *alias);
int split_cmdline(char *cmdline, const char ***argv);
#define alloc_nr(x) (((x)+16)*3/2)
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index d78776a..3cea1fb 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1105,63 +1105,100 @@ int callchain_branch_counts(struct callchain_root *root,
cycles_count);
}
+static int counts_str_build(char *bf, int bfsize,
+ u64 branch_count, u64 predicted_count,
+ u64 abort_count, u64 cycles_count,
+ u64 iter_count, u64 samples_count)
+{
+ double predicted_percent = 0.0;
+ const char *null_str = "";
+ char iter_str[32];
+ char cycle_str[32];
+ char *istr, *cstr;
+ u64 cycles;
+
+ if (branch_count == 0)
+ return scnprintf(bf, bfsize, " (calltrace)");
+
+ cycles = cycles_count / branch_count;
+
+ if (iter_count && samples_count) {
+ if (cycles > 0)
+ scnprintf(iter_str, sizeof(iter_str),
+ " iterations:%" PRId64 "",
+ iter_count / samples_count);
+ else
+ scnprintf(iter_str, sizeof(iter_str),
+ "iterations:%" PRId64 "",
+ iter_count / samples_count);
+ istr = iter_str;
+ } else
+ istr = (char *)null_str;
+
+ if (cycles > 0) {
+ scnprintf(cycle_str, sizeof(cycle_str),
+ "cycles:%" PRId64 "", cycles);
+ cstr = cycle_str;
+ } else
+ cstr = (char *)null_str;
+
+ predicted_percent = predicted_count * 100.0 / branch_count;
+
+ if ((predicted_count == branch_count) && (abort_count == 0)) {
+ if ((cycles > 0) || (istr != (char *)null_str))
+ return scnprintf(bf, bfsize, " (%s%s)", cstr, istr);
+ else
+ return scnprintf(bf, bfsize, "%s", (char *)null_str);
+ }
+
+ if ((predicted_count < branch_count) && (abort_count == 0)) {
+ if ((cycles > 0) || (istr != (char *)null_str))
+ return scnprintf(bf, bfsize,
+ " (predicted:%.1f%% %s%s)",
+ predicted_percent, cstr, istr);
+ else {
+ return scnprintf(bf, bfsize,
+ " (predicted:%.1f%%)",
+ predicted_percent);
+ }
+ }
+
+ if ((predicted_count == branch_count) && (abort_count > 0)) {
+ if ((cycles > 0) || (istr != (char *)null_str))
+ return scnprintf(bf, bfsize,
+ " (abort:%" PRId64 " %s%s)",
+ abort_count, cstr, istr);
+ else
+ return scnprintf(bf, bfsize,
+ " (abort:%" PRId64 ")",
+ abort_count);
+ }
+
+ if ((cycles > 0) || (istr != (char *)null_str))
+ return scnprintf(bf, bfsize,
+ " (predicted:%.1f%% abort:%" PRId64 " %s%s)",
+ predicted_percent, abort_count, cstr, istr);
+
+ return scnprintf(bf, bfsize,
+ " (predicted:%.1f%% abort:%" PRId64 ")",
+ predicted_percent, abort_count);
+}
+
static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 samples_count)
{
- double predicted_percent = 0.0;
- const char *null_str = "";
- char iter_str[32];
- char *str;
- u64 cycles = 0;
+ char str[128];
- if (branch_count == 0) {
- if (fp)
- return fprintf(fp, " (calltrace)");
-
- return scnprintf(bf, bfsize, " (calltrace)");
- }
-
- if (iter_count && samples_count) {
- scnprintf(iter_str, sizeof(iter_str),
- ", iterations:%" PRId64 "",
- iter_count / samples_count);
- str = iter_str;
- } else
- str = (char *)null_str;
-
- predicted_percent = predicted_count * 100.0 / branch_count;
- cycles = cycles_count / branch_count;
-
- if ((predicted_percent >= 100.0) && (abort_count == 0)) {
- if (fp)
- return fprintf(fp, " (cycles:%" PRId64 "%s)",
- cycles, str);
-
- return scnprintf(bf, bfsize, " (cycles:%" PRId64 "%s)",
- cycles, str);
- }
-
- if ((predicted_percent < 100.0) && (abort_count == 0)) {
- if (fp)
- return fprintf(fp,
- " (predicted:%.1f%%, cycles:%" PRId64 "%s)",
- predicted_percent, cycles, str);
-
- return scnprintf(bf, bfsize,
- " (predicted:%.1f%%, cycles:%" PRId64 "%s)",
- predicted_percent, cycles, str);
- }
+ counts_str_build(str, sizeof(str), branch_count,
+ predicted_count, abort_count, cycles_count,
+ iter_count, samples_count);
if (fp)
- return fprintf(fp,
- " (predicted:%.1f%%, abort:%" PRId64 ", cycles:%" PRId64 "%s)",
- predicted_percent, abort_count, cycles, str);
+ return fprintf(fp, "%s", str);
- return scnprintf(bf, bfsize,
- " (predicted:%.1f%%, abort:%" PRId64 ", cycles:%" PRId64 "%s)",
- predicted_percent, abort_count, cycles, str);
+ return scnprintf(bf, bfsize, "%s", str);
}
int callchain_list_counts__printf_value(struct callchain_node *node,
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0c7d5a4..7b01d59 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -627,6 +627,8 @@ static int perf_config_set__init(struct perf_config_set *set)
{
int ret = -1;
const char *home = NULL;
+ char *user_config;
+ struct stat st;
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
if (config_exclusive_filename)
@@ -637,35 +639,41 @@ static int perf_config_set__init(struct perf_config_set *set)
}
home = getenv("HOME");
- if (perf_config_global() && home) {
- char *user_config = strdup(mkpath("%s/.perfconfig", home));
- struct stat st;
- if (user_config == NULL) {
- warning("Not enough memory to process %s/.perfconfig, "
- "ignoring it.", home);
- goto out;
- }
+ /*
+ * Skip reading user config if:
+ * - there is no place to read it from (HOME)
+ * - we are asked not to (PERF_CONFIG_NOGLOBAL=1)
+ */
+ if (!home || !*home || !perf_config_global())
+ return 0;
- if (stat(user_config, &st) < 0) {
- if (errno == ENOENT)
- ret = 0;
- goto out_free;
- }
-
- ret = 0;
-
- if (st.st_uid && (st.st_uid != geteuid())) {
- warning("File %s not owned by current user or root, "
- "ignoring it.", user_config);
- goto out_free;
- }
-
- if (st.st_size)
- ret = perf_config_from_file(collect_config, user_config, set);
-out_free:
- free(user_config);
+ user_config = strdup(mkpath("%s/.perfconfig", home));
+ if (user_config == NULL) {
+ warning("Not enough memory to process %s/.perfconfig, "
+ "ignoring it.", home);
+ goto out;
}
+
+ if (stat(user_config, &st) < 0) {
+ if (errno == ENOENT)
+ ret = 0;
+ goto out_free;
+ }
+
+ ret = 0;
+
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ warning("File %s not owned by current user or root, "
+ "ignoring it.", user_config);
+ goto out_free;
+ }
+
+ if (st.st_size)
+ ret = perf_config_from_file(collect_config, user_config, set);
+
+out_free:
+ free(user_config);
out:
return ret;
}
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
index 2821f8d..3420144 100644
--- a/tools/perf/util/help-unknown-cmd.c
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -6,16 +6,12 @@
#include "levenshtein.h"
static int autocorrect;
-static struct cmdnames aliases;
static int perf_unknown_cmd_config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "help.autocorrect"))
autocorrect = perf_config_int(var,value);
- /* Also use aliases for command lookup */
- if (!prefixcmp(var, "alias."))
- add_cmdname(&aliases, var + 6, strlen(var + 6));
return 0;
}
@@ -59,14 +55,12 @@ const char *help_unknown_cmd(const char *cmd)
memset(&main_cmds, 0, sizeof(main_cmds));
memset(&other_cmds, 0, sizeof(main_cmds));
- memset(&aliases, 0, sizeof(aliases));
perf_config(perf_unknown_cmd_config, NULL);
load_command_list("perf-", &main_cmds, &other_cmds);
- if (add_cmd_list(&main_cmds, &aliases) < 0 ||
- add_cmd_list(&main_cmds, &other_cmds) < 0) {
+ if (add_cmd_list(&main_cmds, &other_cmds) < 0) {
fprintf(stderr, "ERROR: Failed to allocate command list for unknown command.\n");
goto end;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3c4d4d0..61bf304 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -2459,7 +2459,7 @@ int parse_filter_percentage(const struct option *opt __maybe_unused,
else if (!strcmp(arg, "absolute"))
symbol_conf.filter_relative = false;
else {
- pr_debug("Invalud percentage: %s\n", arg);
+ pr_debug("Invalid percentage: %s\n", arg);
return -1;
}
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index a37e593..b2ae039 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -6,10 +6,10 @@ const struct sample_reg __weak sample_reg_masks[] = {
SMPL_REG_END
};
-int __weak sdt_rename_register(char **pdesc __maybe_unused,
- char *old_name __maybe_unused)
+int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
+ char **new_op __maybe_unused)
{
- return 0;
+ return SDT_ARG_SKIP;
}
#ifdef HAVE_PERF_REGS_SUPPORT
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 7544a15..32b37d1 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -15,11 +15,12 @@ struct sample_reg {
extern const struct sample_reg sample_reg_masks[];
-/*
- * The table sdt_reg_renamings is used for adjusting gcc/gas-generated
- * registers before filling the uprobe tracer interface.
- */
-int sdt_rename_register(char **pdesc, char *old_name);
+enum {
+ SDT_ARG_VALID = 0,
+ SDT_ARG_SKIP,
+};
+
+int arch_sdt_arg_parse_op(char *old_op, char **new_op);
#ifdef HAVE_PERF_REGS_SUPPORT
#include <perf_regs.h>
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index d741634..88714de 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -694,10 +694,29 @@ static const char * const type_to_suffix[] = {
"", ":u8", ":u16", "", ":u32", "", "", "", ":u64"
};
+/*
+ * Isolate the string number and convert it into a decimal value;
+ * this will be an index to get suffix of the uprobe name (defining
+ * the type)
+ */
+static int sdt_arg_parse_size(char *n_ptr, const char **suffix)
+{
+ long type_idx;
+
+ type_idx = strtol(n_ptr, NULL, 10);
+ if (type_idx < -8 || type_idx > 8) {
+ pr_debug4("Failed to get a valid sdt type\n");
+ return -1;
+ }
+
+ *suffix = type_to_suffix[type_idx + 8];
+ return 0;
+}
+
static int synthesize_sdt_probe_arg(struct strbuf *buf, int i, const char *arg)
{
- char *tmp, *desc = strdup(arg);
- const char *prefix = "", *suffix = "";
+ char *op, *desc = strdup(arg), *new_op = NULL;
+ const char *suffix = "";
int ret = -1;
if (desc == NULL) {
@@ -705,112 +724,37 @@ static int synthesize_sdt_probe_arg(struct strbuf *buf, int i, const char *arg)
return ret;
}
- tmp = strchr(desc, '@');
- if (tmp) {
- long type_idx;
- /*
- * Isolate the string number and convert it into a
- * binary value; this will be an index to get suffix
- * of the uprobe name (defining the type)
- */
- tmp[0] = '\0';
- type_idx = strtol(desc, NULL, 10);
- /* Check that the conversion went OK */
- if (type_idx == LONG_MIN || type_idx == LONG_MAX) {
- pr_debug4("Failed to parse sdt type\n");
+ /*
+ * Argument is in N@OP format. N is size of the argument and OP is
+ * the actual assembly operand. N can be omitted; in that case
+ * argument is just OP(without @).
+ */
+ op = strchr(desc, '@');
+ if (op) {
+ op[0] = '\0';
+ op++;
+
+ if (sdt_arg_parse_size(desc, &suffix))
goto error;
- }
- /* Check that the converted value is OK */
- if (type_idx < -8 || type_idx > 8) {
- pr_debug4("Failed to get a valid sdt type\n");
- goto error;
- }
- suffix = type_to_suffix[type_idx + 8];
- /* Get rid of the sdt prefix which is now useless */
- tmp++;
- memmove(desc, tmp, strlen(tmp) + 1);
+ } else {
+ op = desc;
}
- /*
- * The uprobe tracer format does not support all the
- * addressing modes (notably: in x86 the scaled mode); so, we
- * detect ',' characters, if there is just one, there is no
- * use converting the sdt arg into a uprobe one.
- */
- if (strchr(desc, ',')) {
- pr_debug4("Skipping unsupported SDT argument; %s\n", desc);
- goto out;
- }
+ ret = arch_sdt_arg_parse_op(op, &new_op);
- /*
- * If the argument addressing mode is indirect, we must check
- * a few things...
- */
- tmp = strchr(desc, '(');
- if (tmp) {
- int j;
-
- /*
- * ...if the addressing mode is indirect with a
- * positive offset (ex.: "1608(%ax)"), we need to add
- * a '+' prefix so as to be compliant with uprobe
- * format.
- */
- if (desc[0] != '+' && desc[0] != '-')
- prefix = "+";
-
- /*
- * ...or if the addressing mode is indirect with a symbol
- * as offset, the argument will not be supported by
- * the uprobe tracer format; so, let's skip this one.
- */
- for (j = 0; j < tmp - desc; j++) {
- if (desc[j] != '+' && desc[j] != '-' &&
- !isdigit(desc[j])) {
- pr_debug4("Skipping unsupported SDT argument; "
- "%s\n", desc);
- goto out;
- }
- }
- }
-
- /*
- * The uprobe tracer format does not support constants; if we
- * find one in the current argument, let's skip the argument.
- */
- if (strchr(desc, '$')) {
- pr_debug4("Skipping unsupported SDT argument; %s\n", desc);
- goto out;
- }
-
- /*
- * The uprobe parser does not support all gas register names;
- * so, we have to replace them (ex. for x86_64: %rax -> %ax);
- * the loop below looks for the register names (starting with
- * a '%' and tries to perform the needed renamings.
- */
- tmp = strchr(desc, '%');
- while (tmp) {
- size_t offset = tmp - desc;
-
- ret = sdt_rename_register(&desc, desc + offset);
- if (ret < 0)
- goto error;
-
- /*
- * The desc pointer might have changed; so, let's not
- * try to reuse tmp for next lookup
- */
- tmp = strchr(desc + offset + 1, '%');
- }
-
- if (strbuf_addf(buf, " arg%d=%s%s%s", i + 1, prefix, desc, suffix) < 0)
+ if (ret < 0)
goto error;
-out:
+ if (ret == SDT_ARG_VALID) {
+ ret = strbuf_addf(buf, " arg%d=%s%s", i + 1, new_op, suffix);
+ if (ret < 0)
+ goto error;
+ }
+
ret = 0;
error:
free(desc);
+ free(new_op);
return ret;
}
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 67531f4..6a1ad58 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,22 +1,23 @@
LIBDIR := ../../../lib
-BPFOBJ := $(LIBDIR)/bpf/bpf.o
+BPFDIR := $(LIBDIR)/bpf
-CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ)
+CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
+LDLIBS += -lcap
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
TEST_PROGS := test_kmod.sh
-all: $(TEST_GEN_PROGS)
+include ../lib.mk
-.PHONY: all clean force
+BPFOBJ := $(OUTPUT)/bpf.o
+
+$(TEST_GEN_PROGS): $(BPFOBJ)
+
+.PHONY: force
# force a rebuild of BPFOBJ when its dependencies are updated
force:
$(BPFOBJ): force
- $(MAKE) -C $(dir $(BPFOBJ))
-
-$(test_objs): $(BPFOBJ)
-
-include ../lib.mk
+ $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index cada17a..a0aa200 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data)
assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
key = 2;
assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
- key = 1;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
+ key = 3;
+ assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
+ errno == E2BIG);
/* Check that key = 0 doesn't exist. */
key = 0;
@@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data)
close(fd);
}
+static void test_hashmap_sizes(int task, void *data)
+{
+ int fd, i, j;
+
+ for (i = 1; i <= 512; i <<= 1)
+ for (j = 1; j <= 1 << 18; j <<= 1) {
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
+ 2, map_flags);
+ if (fd < 0) {
+ printf("Failed to create hashmap key=%d value=%d '%s'\n",
+ i, j, strerror(errno));
+ exit(1);
+ }
+ close(fd);
+ usleep(10); /* give kernel time to destroy */
+ }
+}
+
static void test_hashmap_percpu(int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
@@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data)
static void test_arraymap_percpu_many_keys(void)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
- unsigned int nr_keys = 20000;
+ /* nr_keys is not too large otherwise the test stresses percpu
+ * allocator more than anything else
+ */
+ unsigned int nr_keys = 2000;
long values[nr_cpus];
int key, fd, i;
@@ -419,6 +441,7 @@ static void test_map_stress(void)
{
run_parallel(100, test_hashmap, NULL);
run_parallel(100, test_hashmap_percpu, NULL);
+ run_parallel(100, test_hashmap_sizes, NULL);
run_parallel(100, test_arraymap, NULL);
run_parallel(100, test_arraymap_percpu, NULL);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786d..4d28a9d 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- kvm->buses[bus_idx]->ioeventfd_count--;
+ if (kvm->buses[bus_idx])
+ kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a17d787..88257b31 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- for (i = 0; i < KVM_NR_BUSES; i++)
- kvm_io_bus_destroy(kvm->buses[i]);
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ if (kvm->buses[i])
+ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm->buses[i] = NULL;
+ }
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
* changes) is disallowed above, so any other attribute changes getting
* here can be skipped.
*/
- if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+ if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
r = kvm_iommu_map_pages(kvm, &new);
return r;
}
@@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
/* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
+ if (!bus)
+ return -ENOMEM;
+
/* exclude ioeventfd which is limited by maximum fd */
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
@@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
}
/* Caller must hold slots_lock. */
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev)
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int i, r;
+ int i;
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
- r = -ENOENT;
+ if (!bus)
+ return;
+
for (i = 0; i < bus->dev_count; i++)
if (bus->range[i].dev == dev) {
- r = 0;
break;
}
- if (r)
- return r;
+ if (i == bus->dev_count)
+ return;
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
- if (!new_bus)
- return -ENOMEM;
+ if (!new_bus) {
+ pr_err("kvm: failed to shrink bus, removing it completely\n");
+ goto broken;
+ }
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1,
(new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
- return r;
+ return;
}
struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
srcu_idx = srcu_read_lock(&kvm->srcu);
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ if (!bus)
+ goto out_unlock;
dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
if (dev_idx < 0)