Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging
Pull misc fixes from Guenter Roeck.
These are fixes for regressions that Guenther has been reporting, and
the maintainers haven't picked up and sent in. With rc6 fairly imminent,
I'm taking them directly from Guenter.
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging:
apparmor: fix policy_unpack_test on big endian systems
Revert "MIPS: csrc-r4k: Apply verification clocksource flags"
microblaze: don't treat zero reserved memory regions as error
diff --git a/.mailmap b/.mailmap
index 8ee01d9..caf46a6 100644
--- a/.mailmap
+++ b/.mailmap
@@ -354,6 +354,8 @@
Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org>
Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com>
+Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@linaro.org>
+Konrad Dybcio <konradybcio@kernel.org> <konrad.dybcio@somainline.org>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
@@ -614,6 +616,7 @@
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
+Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml b/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
index 245e8c3..b14e6f3 100644
--- a/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
+++ b/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
@@ -10,7 +10,7 @@
- Fabio Estevam <festevam@gmail.com>
allOf:
- - $ref: usb-hcd.yaml#
+ - $ref: usb-device.yaml#
properties:
compatible:
@@ -36,6 +36,13 @@
- compatible
- reg
+patternProperties:
+ "^.*@[0-9a-f]{1,2}$":
+ description: The hard wired USB devices
+ type: object
+ $ref: /schemas/usb/usb-device.yaml
+ additionalProperties: true
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 04f6aa3..8e30c8f 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -629,18 +629,6 @@
* with beginning and ending almost-blank lines.
*/
-For files in net/ and drivers/net/ the preferred style for long (multi-line)
-comments is a little different.
-
-.. code-block:: c
-
- /* The preferred comment style for files in net/ and drivers/net
- * looks like this.
- *
- * It is nearly the same as the generally preferred comment style,
- * but there is no initial almost-blank line.
- */
-
It's also important to comment data, whether they are basic types or derived
types. To this end, use just one data declaration per line (no commas for
multiple data declarations). This leaves you room for a small comment on each
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index fe86163..30d24ee 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -355,23 +355,6 @@
with better review coverage. Re-posting large series also increases the mailing
list traffic.
-Multi-line comments
-~~~~~~~~~~~~~~~~~~~
-
-Comment style convention is slightly different for networking and most of
-the tree. Instead of this::
-
- /*
- * foobar blah blah blah
- * another line of text
- */
-
-it is requested that you make it look like this::
-
- /* foobar blah blah blah
- * another line of text
- */
-
Local variable ordering ("reverse xmas tree", "RCS")
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/MAINTAINERS b/MAINTAINERS
index 878dcd2..fe83ba7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1880,6 +1880,10 @@
F: drivers/iommu/arm/
F: drivers/iommu/io-pgtable-arm*
+ARM SMMU SVA SUPPORT
+R: Jean-Philippe Brucker <jean-philippe@linaro.org>
+F: drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -2535,8 +2539,7 @@
S: Supported
W: http://www.linux4sam.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git
-F: arch/arm/boot/dts/microchip/at91*
-F: arch/arm/boot/dts/microchip/sama*
+F: arch/arm/boot/dts/microchip/
F: arch/arm/include/debug/at91.S
F: arch/arm/mach-at91/
F: drivers/memory/atmel*
@@ -2745,7 +2748,7 @@
ARM/QUALCOMM SUPPORT
M: Bjorn Andersson <andersson@kernel.org>
-M: Konrad Dybcio <konrad.dybcio@linaro.org>
+M: Konrad Dybcio <konradybcio@kernel.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
@@ -7108,7 +7111,7 @@
DRM DRIVER for Qualcomm Adreno GPUs
M: Rob Clark <robdclark@gmail.com>
R: Sean Paul <sean@poorly.run>
-R: Konrad Dybcio <konrad.dybcio@linaro.org>
+R: Konrad Dybcio <konradybcio@kernel.org>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
@@ -12167,7 +12170,7 @@
M: Chuck Lever <chuck.lever@oracle.com>
M: Jeff Layton <jlayton@kernel.org>
R: Neil Brown <neilb@suse.de>
-R: Olga Kornievskaia <kolga@netapp.com>
+R: Olga Kornievskaia <okorniev@redhat.com>
R: Dai Ngo <Dai.Ngo@oracle.com>
R: Tom Talpey <tom@talpey.com>
L: linux-nfs@vger.kernel.org
@@ -17435,6 +17438,7 @@
L: linuxppc-dev@lists.ozlabs.org
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: imx@lists.linux.dev
S: Maintained
F: drivers/pci/controller/dwc/*layerscape*
@@ -17461,6 +17465,7 @@
M: Lucas Stach <l.stach@pengutronix.de>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: imx@lists.linux.dev
S: Maintained
F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-common.yaml
F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie-ep.yaml
@@ -17639,6 +17644,7 @@
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Krzysztof WilczyĆski <kw@linux.com>
+R: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Rob Herring <robh@kernel.org>
L: linux-pci@vger.kernel.org
S: Supported
@@ -18545,7 +18551,6 @@
QCOM AUDIO (ASoC) DRIVERS
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
-M: Banajit Goswami <bgoswami@quicinc.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linux-arm-msm@vger.kernel.org
S: Supported
@@ -18794,7 +18799,7 @@
QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
M: Bjorn Andersson <andersson@kernel.org>
-M: Konrad Dybcio <konrad.dybcio@linaro.org>
+M: Konrad Dybcio <konradybcio@kernel.org>
L: linux-pm@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -23843,10 +23848,8 @@
F: include/uapi/linux/uvcvideo.h
USB WEBCAM GADGET
-M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
-M: Daniel Scally <dan.scally@ideasonboard.com>
L: linux-usb@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/usb/gadget/function/*uvc*
F: drivers/usb/gadget/legacy/webcam.c
F: include/uapi/linux/usb/g_uvc.h
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi
index 52a0f6e..bcf4d9c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp43-common.dtsi
@@ -274,24 +274,24 @@
led@0 {
chan-name = "R";
- led-cur = /bits/ 8 <0x20>;
- max-cur = /bits/ 8 <0x60>;
+ led-cur = /bits/ 8 <0x6e>;
+ max-cur = /bits/ 8 <0xc8>;
reg = <0>;
color = <LED_COLOR_ID_RED>;
};
led@1 {
chan-name = "G";
- led-cur = /bits/ 8 <0x20>;
- max-cur = /bits/ 8 <0x60>;
+ led-cur = /bits/ 8 <0xbe>;
+ max-cur = /bits/ 8 <0xc8>;
reg = <1>;
color = <LED_COLOR_ID_GREEN>;
};
led@2 {
chan-name = "B";
- led-cur = /bits/ 8 <0x20>;
- max-cur = /bits/ 8 <0x60>;
+ led-cur = /bits/ 8 <0xbe>;
+ max-cur = /bits/ 8 <0xc8>;
reg = <2>;
color = <LED_COLOR_ID_BLUE>;
};
diff --git a/arch/arm/boot/dts/ti/omap/omap3-n900.dts b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
index 07c5b96..4bde334 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-n900.dts
+++ b/arch/arm/boot/dts/ti/omap/omap3-n900.dts
@@ -781,7 +781,7 @@
mount-matrix = "-1", "0", "0",
"0", "1", "0",
- "0", "0", "1";
+ "0", "0", "-1";
};
cam1: camera@3e {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 6b6e3ee..acf2933 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -175,7 +175,7 @@
};
};
- core-cluster-thermal {
+ cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 17f4e31..ab4c919 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -214,7 +214,7 @@
};
};
- core-cluster-thermal {
+ cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 3>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 200e526..5501986 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -182,7 +182,7 @@
};
};
- core-cluster-thermal {
+ cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 3>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
index 8ce4b6a..e3a7db2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -131,7 +131,7 @@
};
thermal-zones {
- core-cluster-thermal {
+ cluster-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index bde89de..1b306d6 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -122,7 +122,7 @@
};
};
- core-cluster1-thermal {
+ cluster1-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 4>;
@@ -151,7 +151,7 @@
};
};
- core-cluster2-thermal {
+ cluster2-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 5>;
@@ -180,7 +180,7 @@
};
};
- core-cluster3-thermal {
+ cluster3-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 6>;
@@ -209,7 +209,7 @@
};
};
- core-cluster4-thermal {
+ cluster4-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 7>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index 26c7ca3..bd75a65 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -492,7 +492,7 @@
};
};
- ddr-cluster5-thermal {
+ ddr-ctrl5-thermal {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
index bf3e046..353ace3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs232.dtso
@@ -21,7 +21,7 @@
&gpio3 {
pinctrl-names = "default";
- pinctrcl-0 = <&pinctrl_gpio3_hog>;
+ pinctrl-0 = <&pinctrl_gpio3_hog>;
uart4_rs485_en {
gpio-hog;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
index f4448cd..8a75d67 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l-rs232-rs485.dtso
@@ -22,7 +22,7 @@
&gpio3 {
pinctrl-names = "default";
- pinctrcl-0 = <&pinctrl_gpio3_hog>;
+ pinctrl-0 = <&pinctrl_gpio3_hog>;
uart4_rs485_en {
gpio-hog;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
index 17e2c19..cc9b81d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
@@ -211,13 +211,12 @@
simple-audio-card,cpu {
sound-dai = <&sai3>;
+ frame-master;
+ bitclock-master;
};
simple-audio-card,codec {
sound-dai = <&wm8962>;
- clocks = <&clk IMX8MP_CLK_IPP_DO_CLKO1>;
- frame-master;
- bitclock-master;
};
};
};
@@ -507,10 +506,9 @@
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
- assigned-clocks = <&clk IMX8MP_CLK_SAI3>,
- <&clk IMX8MP_AUDIO_PLL2> ;
- assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
- assigned-clock-rates = <12288000>, <361267200>;
+ assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
+ assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <12288000>;
fsl,sai-mclk-direction-output;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index da8f19a..e2ee9f5 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -499,7 +499,7 @@
pinctrl-0 = <&pinctrl_usdhc2_hs>, <&pinctrl_usdhc2_gpio>;
pinctrl-1 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
pinctrl-2 = <&pinctrl_usdhc2_uhs>, <&pinctrl_usdhc2_gpio>;
- cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
+ cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
vmmc-supply = <®_usdhc2_vmmc>;
bus-width = <4>;
no-sdio;
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
index edbd8ca..72a9a5d 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
@@ -19,7 +19,7 @@
linux,cma {
compatible = "shared-dma-pool";
reusable;
- alloc-ranges = <0 0x60000000 0 0x40000000>;
+ alloc-ranges = <0 0x80000000 0 0x40000000>;
size = <0 0x10000000>;
linux,cma-default;
};
@@ -156,6 +156,7 @@
&wdog3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index 4a3f4235..a099302 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -1105,7 +1105,7 @@
<&clk IMX93_CLK_SYS_PLL_PFD0_DIV2>;
assigned-clock-rates = <100000000>, <250000000>;
intf_mode = <&wakeupmix_gpr 0x28>;
- snps,clk-csr = <0>;
+ snps,clk-csr = <6>;
nvmem-cells = <ð_mac2>;
nvmem-cell-names = "mac-address";
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 1bbf9a0..425272a 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -27,7 +27,7 @@
reg = <0x0>;
enable-method = "psci";
#cooling-cells = <2>;
- power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domains = <&scmi_perf IMX95_PERF_A55>;
power-domain-names = "perf";
i-cache-size = <32768>;
i-cache-line-size = <64>;
@@ -44,7 +44,7 @@
reg = <0x100>;
enable-method = "psci";
#cooling-cells = <2>;
- power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domains = <&scmi_perf IMX95_PERF_A55>;
power-domain-names = "perf";
i-cache-size = <32768>;
i-cache-line-size = <64>;
@@ -61,7 +61,7 @@
reg = <0x200>;
enable-method = "psci";
#cooling-cells = <2>;
- power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domains = <&scmi_perf IMX95_PERF_A55>;
power-domain-names = "perf";
i-cache-size = <32768>;
i-cache-line-size = <64>;
@@ -78,7 +78,7 @@
reg = <0x300>;
enable-method = "psci";
#cooling-cells = <2>;
- power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domains = <&scmi_perf IMX95_PERF_A55>;
power-domain-names = "perf";
i-cache-size = <32768>;
i-cache-line-size = <64>;
@@ -93,7 +93,7 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x400>;
- power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domains = <&scmi_perf IMX95_PERF_A55>;
power-domain-names = "perf";
enable-method = "psci";
#cooling-cells = <2>;
@@ -110,7 +110,7 @@
device_type = "cpu";
compatible = "arm,cortex-a55";
reg = <0x500>;
- power-domains = <&scmi_devpd IMX95_PERF_A55>;
+ power-domains = <&scmi_perf IMX95_PERF_A55>;
power-domain-names = "perf";
enable-method = "psci";
#cooling-cells = <2>;
@@ -187,7 +187,7 @@
compatible = "cache";
cache-size = <524288>;
cache-line-size = <64>;
- cache-sets = <1024>;
+ cache-sets = <512>;
cache-level = <3>;
cache-unified;
};
diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
index 57365658..0a74ed4 100644
--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
@@ -320,8 +320,8 @@
reg = <0x08af8800 0x400>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 53 IRQ_TYPE_EDGE_BOTH>,
- <GIC_SPI 52 IRQ_TYPE_EDGE_BOTH>;
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pwr_event",
"dp_hs_phy_irq",
"dm_hs_phy_irq";
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
index 7fb980f..9caa14d 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-asus-vivobook-s15.dts
@@ -278,6 +278,13 @@
vdd-l3-supply = <&vreg_s1f_0p7>;
vdd-s1-supply = <&vph_pwr>;
vdd-s2-supply = <&vph_pwr>;
+
+ vreg_l3i_0p8: ldo3 {
+ regulator-name = "vreg_l3i_0p8";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
};
regulators-7 {
@@ -423,11 +430,17 @@
};
&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
status = "okay";
};
&pcie4_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-phy-supply = <&vreg_l3i_0p8>;
vdda-pll-supply = <&vreg_l3e_1p2>;
status = "okay";
@@ -517,7 +530,30 @@
bias-disable;
};
- pcie6a_default: pcie2a-default-state {
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
clkreq-n-pins {
pins = "gpio153";
function = "pcie6a_clk";
@@ -529,7 +565,7 @@
pins = "gpio152";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
index 6152bcd..e17ab82 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts
@@ -268,7 +268,6 @@
pinctrl-0 = <&edp_reg_en>;
pinctrl-names = "default";
- regulator-always-on;
regulator-boot-on;
};
@@ -637,6 +636,14 @@
};
};
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/gen70500_zap.mbn";
+ };
+};
+
&i2c0 {
clock-frequency = <400000>;
@@ -724,9 +731,13 @@
aux-bus {
panel {
- compatible = "edp-panel";
+ compatible = "samsung,atna45af01", "samsung,atna33xc20";
+ enable-gpios = <&pmc8380_3_gpios 4 GPIO_ACTIVE_HIGH>;
power-supply = <&vreg_edp_3p3>;
+ pinctrl-0 = <&edp_bl_en>;
+ pinctrl-names = "default";
+
port {
edp_panel_in: endpoint {
remote-endpoint = <&mdss_dp3_out>;
@@ -756,11 +767,17 @@
};
&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
status = "okay";
};
&pcie4_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-phy-supply = <&vreg_l3i_0p8>;
vdda-pll-supply = <&vreg_l3e_1p2>;
status = "okay";
@@ -785,6 +802,16 @@
status = "okay";
};
+&pmc8380_3_gpios {
+ edp_bl_en: edp-bl-en-state {
+ pins = "gpio4";
+ function = "normal";
+ power-source = <1>; /* 1.8V */
+ input-disable;
+ output-enable;
+ };
+};
+
&qupv3_0 {
status = "okay";
};
@@ -931,7 +958,30 @@
bias-disable;
};
- pcie6a_default: pcie2a-default-state {
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
clkreq-n-pins {
pins = "gpio153";
function = "pcie6a_clk";
@@ -943,15 +993,15 @@
pins = "gpio152";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
- pins = "gpio154";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
tpad_default: tpad-default-state {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
index fbff558..1943bdb 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
@@ -625,16 +625,31 @@
};
&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
status = "okay";
};
&pcie4_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-phy-supply = <&vreg_l3i_0p8>;
vdda-pll-supply = <&vreg_l3e_1p2>;
status = "okay";
};
+&pcie4_port0 {
+ wifi@0 {
+ compatible = "pci17cb,1107";
+ reg = <0x10000 0x0 0x0 0x0 0x0>;
+
+ qcom,ath12k-calibration-variant = "LES790";
+ };
+};
+
&pcie6a {
perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>;
@@ -782,7 +797,30 @@
bias-disable;
};
- pcie6a_default: pcie2a-default-state {
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
clkreq-n-pins {
pins = "gpio153";
function = "pcie6a_clk";
@@ -794,15 +832,15 @@
pins = "gpio152";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
- pins = "gpio154";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
tpad_default: tpad-default-state {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
index 72a4f41..8098e67 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
+++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts
@@ -606,6 +606,14 @@
};
};
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ firmware-name = "qcom/x1e80100/gen70500_zap.mbn";
+ };
+};
+
&lpass_tlmm {
spkr_01_sd_n_active: spkr-01-sd-n-active-state {
pins = "gpio12";
@@ -660,11 +668,17 @@
};
&pcie4 {
+ perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>;
+
+ pinctrl-0 = <&pcie4_default>;
+ pinctrl-names = "default";
+
status = "okay";
};
&pcie4_phy {
- vdda-phy-supply = <&vreg_l3j_0p8>;
+ vdda-phy-supply = <&vreg_l3i_0p8>;
vdda-pll-supply = <&vreg_l3e_1p2>;
status = "okay";
@@ -804,7 +818,30 @@
bias-disable;
};
- pcie6a_default: pcie2a-default-state {
+ pcie4_default: pcie4-default-state {
+ clkreq-n-pins {
+ pins = "gpio147";
+ function = "pcie4_clk";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
+ perst-n-pins {
+ pins = "gpio146";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ wake-n-pins {
+ pins = "gpio148";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ pcie6a_default: pcie6a-default-state {
clkreq-n-pins {
pins = "gpio153";
function = "pcie6a_clk";
@@ -816,15 +853,15 @@
pins = "gpio152";
function = "gpio";
drive-strength = <2>;
- bias-pull-down;
+ bias-disable;
};
wake-n-pins {
- pins = "gpio154";
- function = "gpio";
- drive-strength = <2>;
- bias-pull-up;
- };
+ pins = "gpio154";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
wcd_default: wcd-reset-n-active-state {
diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
index 7bca5fc..cd732ef 100644
--- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi
+++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi
@@ -2901,7 +2901,7 @@
dma-coherent;
- linux,pci-domain = <7>;
+ linux,pci-domain = <6>;
num-lanes = <2>;
interrupts = <GIC_SPI 773 IRQ_TYPE_LEVEL_HIGH>,
@@ -2959,6 +2959,7 @@
"link_down";
power-domains = <&gcc GCC_PCIE_6A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie6a_phy>;
phy-names = "pciephy";
@@ -3022,7 +3023,7 @@
dma-coherent;
- linux,pci-domain = <5>;
+ linux,pci-domain = <4>;
num-lanes = <2>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
@@ -3080,11 +3081,22 @@
"link_down";
power-domains = <&gcc GCC_PCIE_4_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
phys = <&pcie4_phy>;
phy-names = "pciephy";
status = "disabled";
+
+ pcie4_port0: pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
};
pcie4_phy: phy@1c0e000 {
@@ -3155,9 +3167,10 @@
interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>;
interconnect-names = "gfx-mem";
+ status = "disabled";
+
zap-shader {
memory-region = <&gpu_microcode_mem>;
- firmware-name = "qcom/gen70500_zap.mbn";
};
gpu_opp_table: opp-table {
@@ -3288,7 +3301,7 @@
reg = <0x0 0x03da0000 0x0 0x40000>;
#iommu-cells = <2>;
#global-interrupts = <1>;
- interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 674 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 679 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 680 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 7d32fca..362df93 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -887,6 +887,7 @@
CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m
CONFIG_DRM_PANEL_NOVATEK_NT36672E=m
CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
+CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m
CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_PANEL_VISIONOX_VTDR6130=m
diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h
deleted file mode 100644
index 75ccd80..0000000
--- a/arch/loongarch/include/asm/dma-direct.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
-#ifndef _LOONGARCH_DMA_DIRECT_H
-#define _LOONGARCH_DMA_DIRECT_H
-
-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-#endif /* _LOONGARCH_DMA_DIRECT_H */
diff --git a/arch/loongarch/include/asm/hw_irq.h b/arch/loongarch/include/asm/hw_irq.h
index af4f4e8..8156ffb 100644
--- a/arch/loongarch/include/asm/hw_irq.h
+++ b/arch/loongarch/include/asm/hw_irq.h
@@ -9,6 +9,8 @@
extern atomic_t irq_err_count;
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
+
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
* machines, we'll see ...
diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h
index c416cb7..8657008 100644
--- a/arch/loongarch/include/asm/kvm_vcpu.h
+++ b/arch/loongarch/include/asm/kvm_vcpu.h
@@ -76,7 +76,6 @@
#endif
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
-void kvm_reset_timer(struct kvm_vcpu *vcpu);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 69a85f2..6ab6401 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -530,6 +530,10 @@
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD _restore_fp
+#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD _restore_lsx
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD _restore_lasx
#endif
+#endif
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
index f4991c0..adac8fc 100644
--- a/arch/loongarch/kernel/irq.c
+++ b/arch/loongarch/kernel/irq.c
@@ -102,9 +102,6 @@
mp_ops.init_ipi();
#endif
- for (i = 0; i < NR_IRQS; i++)
- irq_set_noprobe(i);
-
for_each_possible_cpu(i) {
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 80e9889..0c292f8 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -277,6 +277,10 @@
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD kvm_restore_fpu
+#ifdef CONFIG_CPU_HAS_LSX
STACK_FRAME_NON_STANDARD kvm_restore_lsx
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
STACK_FRAME_NON_STANDARD kvm_restore_lasx
#endif
+#endif
diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c
index bcc6b6d..74a4b5c 100644
--- a/arch/loongarch/kvm/timer.c
+++ b/arch/loongarch/kvm/timer.c
@@ -188,10 +188,3 @@
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
preempt_enable();
}
-
-void kvm_reset_timer(struct kvm_vcpu *vcpu)
-{
- write_gcsr_timercfg(0);
- kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
- hrtimer_cancel(&vcpu->arch.swtimer);
-}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 16756ff..6905283 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -647,7 +647,7 @@
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
break;
case KVM_REG_LOONGARCH_VCPU_RESET:
- kvm_reset_timer(vcpu);
+ vcpu->arch.st.guest_addr = 0;
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
break;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 83eb776..4c9f20a 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -174,7 +174,7 @@
* on an I/O error, in which case we'll turn any error into
* "not supported" here.
*/
- if (ret && !limit)
+ if (ret && !bdev_write_zeroes_sectors(bdev))
return -EOPNOTSUPP;
return ret;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 7d5e4de..1ccbb515 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <asm/unaligned.h>
+#include <linux/efi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -26,6 +27,8 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
+#define BTINTEL_EFI_DSBR L"UefiCnvCommonDSBR"
+
enum {
DSM_SET_WDISABLE2_DELAY = 1,
DSM_SET_RESET_METHOD = 3,
@@ -2616,6 +2619,120 @@
return hci_skb_pkt_type(skb);
}
+/*
+ * UefiCnvCommonDSBR UEFI variable provides information from the OEM platforms
+ * if they have replaced the BRI (Bluetooth Radio Interface) resistor to
+ * overcome the potential STEP errors on their designs. Based on the
+ * configauration, bluetooth firmware shall adjust the BRI response line drive
+ * strength. The below structure represents DSBR data.
+ * struct {
+ * u8 header;
+ * u32 dsbr;
+ * } __packed;
+ *
+ * header - defines revision number of the structure
+ * dsbr - defines drive strength BRI response
+ * bit0
+ * 0 - instructs bluetooth firmware to use default values
+ * 1 - instructs bluetooth firmware to override default values
+ * bit3:1
+ * Reserved
+ * bit7:4
+ * DSBR override values (only if bit0 is set. Default value is 0xF
+ * bit31:7
+ * Reserved
+ * Expected values for dsbr field:
+ * 1. 0xF1 - indicates that the resistor on board is 33 Ohm
+ * 2. 0x00 or 0xB1 - indicates that the resistor on board is 10 Ohm
+ * 3. Non existing UEFI variable or invalid (none of the above) - indicates
+ * that the resistor on board is 10 Ohm
+ * Even if uefi variable is not present, driver shall send 0xfc0a command to
+ * firmware to use default values.
+ *
+ */
+static int btintel_uefi_get_dsbr(u32 *dsbr_var)
+{
+ struct btintel_dsbr {
+ u8 header;
+ u32 dsbr;
+ } __packed data;
+
+ efi_status_t status;
+ unsigned long data_size = 0;
+ efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
+ 0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
+
+ if (!IS_ENABLED(CONFIG_EFI))
+ return -EOPNOTSUPP;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return -EOPNOTSUPP;
+
+ status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
+ NULL);
+
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size)
+ return -EIO;
+
+ status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
+ &data);
+
+ if (status != EFI_SUCCESS)
+ return -ENXIO;
+
+ *dsbr_var = data.dsbr;
+ return 0;
+}
+
+static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ struct btintel_dsbr_cmd {
+ u8 enable;
+ u8 dsbr;
+ } __packed;
+
+ struct btintel_dsbr_cmd cmd;
+ struct sk_buff *skb;
+ u8 status;
+ u32 dsbr;
+ bool apply_dsbr;
+ int err;
+
+ /* DSBR command needs to be sent for BlazarI + B0 step product after
+ * downloading IML image.
+ */
+ apply_dsbr = (ver->img_type == BTINTEL_IMG_IML &&
+ ((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) &&
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01);
+
+ if (!apply_dsbr)
+ return 0;
+
+ dsbr = 0;
+ err = btintel_uefi_get_dsbr(&dsbr);
+ if (err < 0)
+ bt_dev_dbg(hdev, "Error reading efi: %ls (%d)",
+ BTINTEL_EFI_DSBR, err);
+
+ cmd.enable = dsbr & BIT(0);
+ cmd.dsbr = dsbr >> 4 & 0xF;
+
+ bt_dev_info(hdev, "dsbr: enable: 0x%2.2x value: 0x%2.2x", cmd.enable,
+ cmd.dsbr);
+
+ skb = __hci_cmd_sync(hdev, 0xfc0a, sizeof(cmd), &cmd, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return -bt_to_errno(PTR_ERR(skb));
+
+ status = skb->data[0];
+ kfree_skb(skb);
+
+ if (status)
+ return -bt_to_errno(status);
+
+ return 0;
+}
+
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2650,6 +2767,13 @@
if (err)
return err;
+ /* set drive strength of BRI response */
+ err = btintel_set_dsbr(hdev, ver);
+ if (err) {
+ bt_dev_err(hdev, "Failed to send dsbr command (%d)", err);
+ return err;
+ }
+
/* If image type returned is BTINTEL_IMG_IML, then controller supports
* intermediate loader image
*/
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 31d3dd9..ad1ec6f 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -449,6 +449,23 @@
return false;
}
+static void ps_cleanup(struct btnxpuart_dev *nxpdev)
+{
+ struct ps_data *psdata = &nxpdev->psdata;
+ u8 ps_state;
+
+ mutex_lock(&psdata->ps_lock);
+ ps_state = psdata->ps_state;
+ mutex_unlock(&psdata->ps_lock);
+
+ if (ps_state != PS_STATE_AWAKE)
+ ps_control(psdata->hdev, PS_STATE_AWAKE);
+
+ ps_cancel_timer(nxpdev);
+ cancel_work_sync(&psdata->work);
+ mutex_destroy(&psdata->ps_lock);
+}
+
static int send_ps_cmd(struct hci_dev *hdev, void *data)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1363,7 +1380,6 @@
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
- ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
skb_queue_purge(&nxpdev->txq);
if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
@@ -1516,8 +1532,8 @@
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
}
- ps_cancel_timer(nxpdev);
}
+ ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index d3989b2..1e5b107 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -698,6 +698,10 @@
rc = tpm2_get_cc_attrs_tbl(chip);
if (rc)
goto init_irq_cleanup;
+
+ rc = tpm2_sessions_init(chip);
+ if (rc)
+ goto init_irq_cleanup;
}
return tpm_chip_register(chip);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 66b73c3..b731866 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -160,14 +160,17 @@
lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
}
- if ((highest_perf != READ_ONCE(cpudata->highest_perf)) ||
- (nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
+ if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
+ pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
+ __func__, cpu, highest_perf, cpudata->highest_perf);
+ goto skip_test;
+ }
+ if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
(lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
(lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, highest_perf, cpudata->highest_perf,
- nominal_perf, cpudata->nominal_perf,
+ pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
+ __func__, cpu, nominal_perf, cpudata->nominal_perf,
lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
lowest_perf, cpudata->lowest_perf);
goto skip_test;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 68c616b..89bda7a 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -321,7 +321,7 @@
return 0;
for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_die_id(cpu);
+ unsigned long logical_id = topology_logical_package_id(cpu);
if (test_bit(logical_id, &logical_proc_id_mask))
continue;
@@ -692,7 +692,7 @@
struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
u32 highest_perf, nominal_perf, nominal_freq, max_freq;
- int ret;
+ int ret = 0;
highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 10e8f07..e3f8db4 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -17,8 +17,8 @@
DW_HDMA_V0_CB = BIT(0),
DW_HDMA_V0_TCB = BIT(1),
DW_HDMA_V0_LLP = BIT(2),
- DW_HDMA_V0_LIE = BIT(3),
- DW_HDMA_V0_RIE = BIT(4),
+ DW_HDMA_V0_LWIE = BIT(3),
+ DW_HDMA_V0_RWIE = BIT(4),
DW_HDMA_V0_CCS = BIT(8),
DW_HDMA_V0_LLE = BIT(9),
};
@@ -195,25 +195,14 @@
static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
- struct dw_edma_chan *chan = chunk->chan;
u32 control = 0, i = 0;
- int j;
if (chunk->cb)
control = DW_HDMA_V0_CB;
- j = chunk->bursts_alloc;
- list_for_each_entry(child, &chunk->burst->list, list) {
- j--;
- if (!j) {
- control |= DW_HDMA_V0_LIE;
- if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
- control |= DW_HDMA_V0_RIE;
- }
-
+ list_for_each_entry(child, &chunk->burst->list, list)
dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
child->sar, child->dar);
- }
control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
if (!chunk->cb)
@@ -247,10 +236,11 @@
if (first) {
/* Enable engine */
SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
- /* Interrupt enable&unmask - done, abort */
- tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
- HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ /* Interrupt unmask - stop, abort */
+ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
+ tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
+ /* Interrupt enable - stop, abort */
+ tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5f7d690..dd75f97 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -621,12 +622,10 @@
struct dw_desc *prev;
struct dw_desc *first;
u32 ctllo, ctlhi;
- u8 m_master = dwc->dws.m_master;
- u8 lms = DWC_LLP_LMS(m_master);
+ u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
dma_addr_t reg;
unsigned int reg_width;
unsigned int mem_width;
- unsigned int data_width = dw->pdata->data_width[m_master];
unsigned int i;
struct scatterlist *sg;
size_t total_len = 0;
@@ -660,7 +659,7 @@
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = __ffs(data_width | mem | len);
+ mem_width = __ffs(sconfig->src_addr_width | mem | len);
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -720,7 +719,7 @@
lli_write(desc, sar, reg);
lli_write(desc, dar, mem);
lli_write(desc, ctlhi, ctlhi);
- mem_width = __ffs(data_width | mem);
+ mem_width = __ffs(sconfig->dst_addr_width | mem);
lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
desc->len = dlen;
@@ -780,20 +779,108 @@
}
EXPORT_SYMBOL_GPL(dw_dma_filter);
-static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+static int dwc_verify_maxburst(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+
+ dwc->dma_sconfig.src_maxburst =
+ clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst);
+ dwc->dma_sconfig.dst_maxburst =
+ clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst);
+
+ dwc->dma_sconfig.src_maxburst =
+ rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst);
+ dwc->dma_sconfig.dst_maxburst =
+ rounddown_pow_of_two(dwc->dma_sconfig.dst_maxburst);
+
+ return 0;
+}
+
+static int dwc_verify_p_buswidth(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
+ u32 reg_width, max_width;
+
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ reg_width = dwc->dma_sconfig.dst_addr_width;
+ else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
+ reg_width = dwc->dma_sconfig.src_addr_width;
+ else /* DMA_MEM_TO_MEM */
+ return 0;
+
+ max_width = dw->pdata->data_width[dwc->dws.p_master];
+
+ /* Fall-back to 1-byte transfer width if undefined */
+ if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (!is_power_of_2(reg_width) || reg_width > max_width)
+ return -EINVAL;
+ else /* bus width is valid */
+ return 0;
+
+ /* Update undefined addr width value */
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
+ dwc->dma_sconfig.dst_addr_width = reg_width;
+ else /* DMA_DEV_TO_MEM */
+ dwc->dma_sconfig.src_addr_width = reg_width;
+
+ return 0;
+}
+
+static int dwc_verify_m_buswidth(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ u32 reg_width, reg_burst, mem_width;
+
+ mem_width = dw->pdata->data_width[dwc->dws.m_master];
+
+ /*
+ * It's possible to have a data portion locked in the DMA FIFO in case
+ * of the channel suspension. Subsequent channel disabling will cause
+ * that data silent loss. In order to prevent that maintain the src and
+ * dst transfer widths coherency by means of the relation:
+ * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH)
+ * Look for the details in the commit message that brings this change.
+ *
+ * Note the DMA configs utilized in the calculations below must have
+ * been verified to have correct values by this method call.
+ */
+ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
+ reg_width = dwc->dma_sconfig.dst_addr_width;
+ if (mem_width < reg_width)
+ return -EINVAL;
+
+ dwc->dma_sconfig.src_addr_width = mem_width;
+ } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) {
+ reg_width = dwc->dma_sconfig.src_addr_width;
+ reg_burst = dwc->dma_sconfig.src_maxburst;
+
+ dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst);
+ }
+
+ return 0;
+}
+
+static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ int ret;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
- dwc->dma_sconfig.src_maxburst =
- clamp(dwc->dma_sconfig.src_maxburst, 0U, dwc->max_burst);
- dwc->dma_sconfig.dst_maxburst =
- clamp(dwc->dma_sconfig.dst_maxburst, 0U, dwc->max_burst);
+ ret = dwc_verify_maxburst(chan);
+ if (ret)
+ return ret;
- dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
- dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
+ ret = dwc_verify_p_buswidth(chan);
+ if (ret)
+ return ret;
+
+ ret = dwc_verify_m_buswidth(chan);
+ if (ret)
+ return ret;
return 0;
}
@@ -1068,7 +1155,7 @@
bool autocfg = false;
unsigned int dw_params;
unsigned int i;
- int err;
+ int ret;
dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
if (!dw->pdata)
@@ -1084,7 +1171,7 @@
autocfg = dw_params >> DW_PARAMS_EN & 1;
if (!autocfg) {
- err = -EINVAL;
+ ret = -EINVAL;
goto err_pdata;
}
@@ -1104,7 +1191,7 @@
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
- err = -EINVAL;
+ ret = -EINVAL;
goto err_pdata;
} else {
memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
@@ -1116,7 +1203,7 @@
dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
GFP_KERNEL);
if (!dw->chan) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_pdata;
}
@@ -1134,15 +1221,15 @@
sizeof(struct dw_desc), 4, 0);
if (!dw->desc_pool) {
dev_err(chip->dev, "No memory for descriptors dma pool\n");
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_pdata;
}
tasklet_setup(&dw->tasklet, dw_dma_tasklet);
- err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
+ ret = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
dw->name, dw);
- if (err)
+ if (ret)
goto err_pdata;
INIT_LIST_HEAD(&dw->dma.channels);
@@ -1254,8 +1341,8 @@
*/
dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
- err = dma_async_device_register(&dw->dma);
- if (err)
+ ret = dma_async_device_register(&dw->dma);
+ if (ret)
goto err_dma_register;
dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
@@ -1269,7 +1356,7 @@
free_irq(chip->irq, dw);
err_pdata:
pm_runtime_put_sync_suspend(chip->dev);
- return err;
+ return ret;
}
int do_dma_remove(struct dw_dma_chip *chip)
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c
index a486226..6766142 100644
--- a/drivers/dma/dw/dw.c
+++ b/drivers/dma/dw/dw.c
@@ -64,28 +64,37 @@
return DWC_CTLH_BLOCK_TS(block) << width;
}
-static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
-{
- struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
- u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
- u8 p_master = dwc->dws.p_master;
- u8 m_master = dwc->dws.m_master;
- u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master;
- u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master;
-
- return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
- DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
- DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
-}
-
-static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
+static inline u8 dw_dma_encode_maxburst(u32 maxburst)
{
/*
* Fix burst size according to dw_dmac. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
*/
- *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0;
+ return maxburst > 1 ? fls(maxburst) - 2 : 0;
+}
+
+static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc)
+{
+ struct dma_slave_config *sconfig = &dwc->dma_sconfig;
+ u8 smsize = 0, dmsize = 0;
+ u8 sms, dms;
+
+ if (dwc->direction == DMA_MEM_TO_DEV) {
+ sms = dwc->dws.m_master;
+ dms = dwc->dws.p_master;
+ dmsize = dw_dma_encode_maxburst(sconfig->dst_maxburst);
+ } else if (dwc->direction == DMA_DEV_TO_MEM) {
+ sms = dwc->dws.p_master;
+ dms = dwc->dws.m_master;
+ smsize = dw_dma_encode_maxburst(sconfig->src_maxburst);
+ } else /* DMA_MEM_TO_MEM */ {
+ sms = dwc->dws.m_master;
+ dms = dwc->dws.m_master;
+ }
+
+ return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
+ DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) |
+ DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms);
}
static void dw_dma_set_device_name(struct dw_dma *dw, int id)
@@ -116,7 +125,6 @@
dw->suspend_chan = dw_dma_suspend_chan;
dw->resume_chan = dw_dma_resume_chan;
dw->prepare_ctllo = dw_dma_prepare_ctllo;
- dw->encode_maxburst = dw_dma_encode_maxburst;
dw->bytes2block = dw_dma_bytes2block;
dw->block2bytes = dw_dma_block2bytes;
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c
index 58f4078..dac617c 100644
--- a/drivers/dma/dw/idma32.c
+++ b/drivers/dma/dw/idma32.c
@@ -199,21 +199,25 @@
return IDMA32C_CTLH_BLOCK_TS(block);
}
+static inline u8 idma32_encode_maxburst(u32 maxburst)
+{
+ return maxburst > 1 ? fls(maxburst) - 1 : 0;
+}
+
static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc)
{
struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- u8 smsize = (dwc->direction == DMA_DEV_TO_MEM) ? sconfig->src_maxburst : 0;
- u8 dmsize = (dwc->direction == DMA_MEM_TO_DEV) ? sconfig->dst_maxburst : 0;
+ u8 smsize = 0, dmsize = 0;
+
+ if (dwc->direction == DMA_MEM_TO_DEV)
+ dmsize = idma32_encode_maxburst(sconfig->dst_maxburst);
+ else if (dwc->direction == DMA_DEV_TO_MEM)
+ smsize = idma32_encode_maxburst(sconfig->src_maxburst);
return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN |
DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize);
}
-static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst)
-{
- *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0;
-}
-
static void idma32_set_device_name(struct dw_dma *dw, int id)
{
snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id);
@@ -270,7 +274,6 @@
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
- dw->encode_maxburst = idma32_encode_maxburst;
dw->bytes2block = idma32_bytes2block;
dw->block2bytes = idma32_block2bytes;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 7d9d4c9..47c58ad 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -29,7 +29,7 @@
struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
- int err;
+ int ret;
match = device_get_match_data(dev);
if (!match)
@@ -51,9 +51,9 @@
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return err;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
if (!data->pdata)
data->pdata = dev_get_platdata(dev);
@@ -69,14 +69,14 @@
chip->clk = devm_clk_get_optional(chip->dev, "hclk");
if (IS_ERR(chip->clk))
return PTR_ERR(chip->clk);
- err = clk_prepare_enable(chip->clk);
- if (err)
- return err;
+ ret = clk_prepare_enable(chip->clk);
+ if (ret)
+ return ret;
pm_runtime_enable(&pdev->dev);
- err = data->probe(chip);
- if (err)
+ ret = data->probe(chip);
+ if (ret)
goto err_dw_dma_probe;
platform_set_drvdata(pdev, data);
@@ -90,7 +90,7 @@
err_dw_dma_probe:
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
- return err;
+ return ret;
}
static void dw_remove(struct platform_device *pdev)
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 76654bd..5969d9c 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -327,7 +327,6 @@
void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
- void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
unsigned int width, size_t *len);
size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 4087e02..0be6e94 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -403,6 +403,7 @@
swdesc = kzalloc(struct_size(swdesc, lli, count), GFP_NOWAIT);
if (!swdesc)
return NULL;
+ swdesc->lli_size = count;
for (i = 0; i < count; i++) {
swdesc->lli[i].hwdesc = dma_pool_zalloc(chan->lli_pool, GFP_NOWAIT,
@@ -410,7 +411,6 @@
if (!swdesc->lli[i].hwdesc)
goto err_pool_free;
}
- swdesc->lli_size = count;
swdesc->ccr = 0;
/* Set LL base address */
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 7e6c04a..6ab9bfb 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1186,10 +1186,10 @@
d->dev_addr = dev_addr;
d->fi = burst;
d->es = es;
+ d->sglen = 1;
d->sg[0].addr = buf_addr;
d->sg[0].en = period_len / es_bytes[es];
d->sg[0].fn = buf_len / period_len;
- d->sglen = 1;
d->ccr = c->ccr;
if (dir == DMA_DEV_TO_MEM)
@@ -1258,10 +1258,10 @@
d->dev_addr = src;
d->fi = 0;
d->es = data_type;
+ d->sglen = 1;
d->sg[0].en = len / BIT(data_type);
d->sg[0].fn = 1;
d->sg[0].addr = dest;
- d->sglen = 1;
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
@@ -1309,6 +1309,7 @@
if (data_type > CSDP_DATA_TYPE_32)
data_type = CSDP_DATA_TYPE_32;
+ d->sglen = 1;
sg = &d->sg[0];
d->dir = DMA_MEM_TO_MEM;
d->dev_addr = xt->src_start;
@@ -1316,7 +1317,6 @@
sg->en = xt->sgl[0].size / BIT(data_type);
sg->fn = xt->numf;
sg->addr = xt->dst_start;
- d->sglen = 1;
d->ccr = c->ccr;
src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 30de4789..9ca5ee5 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -166,7 +166,7 @@
*/
ret = wait_for_completion_timeout(&priv->programming_complete,
msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS));
- if (ret)
+ if (!ret)
return FW_UPLOAD_ERR_TIMEOUT;
return FW_UPLOAD_ERR_NONE;
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index dca5f3f..2b4c282 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -73,7 +73,7 @@
struct arm_smccc_res get_wq_res;
struct arm_smccc_args get_wq_ctx = {0};
- get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index 17948cf..92b3651 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -40,7 +40,6 @@
};
struct qcom_tzmem_chunk {
- phys_addr_t paddr;
size_t size;
struct qcom_tzmem_pool *owner;
};
@@ -78,6 +77,7 @@
/* List of machines that are known to not support SHM bridge correctly. */
static const char *const qcom_tzmem_blacklist[] = {
"qcom,sc8180x",
+ "qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
"qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
@@ -385,7 +385,6 @@
return NULL;
}
- chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr);
chunk->size = size;
chunk->owner = pool;
@@ -431,25 +430,37 @@
EXPORT_SYMBOL_GPL(qcom_tzmem_free);
/**
- * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical.
- * @vaddr: Virtual address of the buffer allocated from a TZ memory pool.
+ * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
+ * @vaddr: Virtual address of memory allocated from a TZ memory pool.
*
- * Can be used in any context. The address must have been returned by a call
- * to qcom_tzmem_alloc().
+ * Can be used in any context. The address must point to memory allocated
+ * using qcom_tzmem_alloc().
*
- * Returns: Physical address of the buffer.
+ * Returns:
+ * Physical address mapped from the virtual or 0 if the mapping failed.
*/
phys_addr_t qcom_tzmem_to_phys(void *vaddr)
{
struct qcom_tzmem_chunk *chunk;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ phys_addr_t ret;
guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
- chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr);
- if (!chunk)
- return 0;
+ radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
+ chunk = radix_tree_deref_slot_protected(slot,
+ &qcom_tzmem_chunks_lock);
- return chunk->paddr;
+ ret = gen_pool_virt_to_phys(chunk->owner->genpool,
+ (unsigned long)vaddr);
+ if (ret == -1)
+ continue;
+
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 921f615..02a07d3 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -39,6 +39,8 @@
static DEFINE_MUTEX(disable_lock);
static bool disabled;
+static struct device *sysfb_parent_dev(const struct screen_info *si);
+
static bool sysfb_unregister(void)
{
if (IS_ERR_OR_NULL(pd))
@@ -52,6 +54,7 @@
/**
* sysfb_disable() - disable the Generic System Framebuffers support
+ * @dev: the device to check if non-NULL
*
* This disables the registration of system framebuffer devices that match the
* generic drivers that make use of the system framebuffer set up by firmware.
@@ -61,17 +64,21 @@
* Context: The function can sleep. A @disable_lock mutex is acquired to serialize
* against sysfb_init(), that registers a system framebuffer device.
*/
-void sysfb_disable(void)
+void sysfb_disable(struct device *dev)
{
+ struct screen_info *si = &screen_info;
+
mutex_lock(&disable_lock);
- sysfb_unregister();
- disabled = true;
+ if (!dev || dev == sysfb_parent_dev(si)) {
+ sysfb_unregister();
+ disabled = true;
+ }
mutex_unlock(&disable_lock);
}
EXPORT_SYMBOL_GPL(sysfb_disable);
#if defined(CONFIG_PCI)
-static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
+static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
{
/*
* TODO: Try to integrate this code into the PCI subsystem
@@ -87,13 +94,13 @@
return true;
}
#else
-static __init bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
+static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
{
return false;
}
#endif
-static __init struct device *sysfb_parent_dev(const struct screen_info *si)
+static struct device *sysfb_parent_dev(const struct screen_info *si)
{
struct pci_dev *pdev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 7b561e8..4bd61c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1500,6 +1500,7 @@
struct gc_info_v1_0 v1;
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
+ struct gc_info_v1_3 v1_3;
struct gc_info_v2_0 v2;
struct gc_info_v2_1 v2_1;
};
@@ -1558,6 +1559,16 @@
adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
}
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
+ adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
+ adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
+ adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
+ }
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ddda94e..56cc58e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -240,6 +240,12 @@
uint32_t gc_tcp_size_per_cu;
uint32_t gc_num_cu_per_sqc;
uint32_t gc_tcc_size;
+ uint32_t gc_tcp_cache_line_size;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_instruction_cache_line_size;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_line_size;
+ uint32_t gc_tcc_cache_line_size;
};
struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 2c611b8..e45d23e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3005,7 +3005,7 @@
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index b7a08e7..d163d92 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -187,6 +187,7 @@
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index a83bd03..5cb11cc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -28,6 +28,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fourcc.h>
#include "amdgpu.h"
@@ -935,10 +936,14 @@
}
afb = to_amdgpu_framebuffer(new_state->fb);
- obj = new_state->fb->obj[0];
+ obj = drm_gem_fb_get_obj(new_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return -EINVAL;
+ }
+
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
-
r = amdgpu_bo_reserve(rbo, true);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 46bf19c..710e328 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -258,6 +258,48 @@
uint32_t gc_gl2c_per_gpu;
};
+struct gc_info_v1_3 {
+ struct gpu_info_header header;
+ uint32_t gc_num_se;
+ uint32_t gc_num_wgp0_per_sa;
+ uint32_t gc_num_wgp1_per_sa;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_gl2c;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_sa_per_se;
+ uint32_t gc_num_packer_per_sc;
+ uint32_t gc_num_gl2a;
+ uint32_t gc_num_tcp_per_sa;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_tcps;
+ uint32_t gc_num_tcp_per_wpg;
+ uint32_t gc_tcp_l1_size;
+ uint32_t gc_num_sqc_per_wgp;
+ uint32_t gc_l1_instruction_cache_size_per_sqc;
+ uint32_t gc_l1_data_cache_size_per_sqc;
+ uint32_t gc_gl1c_per_sa;
+ uint32_t gc_gl1c_size_per_instance;
+ uint32_t gc_gl2c_per_gpu;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_tcp_cache_line_size;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_instruction_cache_line_size;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_line_size;
+ uint32_t gc_tcc_size;
+ uint32_t gc_tcc_cache_line_size;
+};
+
struct gc_info_v2_0 {
struct gpu_info_header header;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 9d7454b..74e35f8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2224,8 +2224,9 @@
}
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
- enum amd_dpm_forced_level level,
- bool skip_display_settings)
+ enum amd_dpm_forced_level level,
+ bool skip_display_settings,
+ bool force_update)
{
int ret = 0;
int index = 0;
@@ -2254,7 +2255,7 @@
}
}
- if (smu_dpm_ctx->dpm_level != level) {
+ if (force_update || smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@@ -2265,13 +2266,12 @@
smu_dpm_ctx->dpm_level = level;
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
- if (smu->power_profile_mode != workload[0])
+ if (force_update || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
@@ -2292,11 +2292,13 @@
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
- ret = smu_adjust_power_state_dynamic(smu, level, false);
+ ret = smu_adjust_power_state_dynamic(smu, level, false, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
+ ret = smu_adjust_power_state_dynamic(smu, level, true, true);
+ break;
case AMD_PP_TASK_READJUST_POWER_STATE:
- ret = smu_adjust_power_state_dynamic(smu, level, true);
+ ret = smu_adjust_power_state_dynamic(smu, level, true, false);
break;
default:
break;
@@ -2343,8 +2345,7 @@
workload[0] = smu->workload_setting[index];
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
index de2e442..87ca5ce 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
@@ -92,7 +92,6 @@
//Resets
#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
-#define PPSMC_MSG_Mode1Reset 0x2F
//Set SystemVirtual DramAddrHigh
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
@@ -119,11 +118,12 @@
//STB to dram log
#define PPSMC_MSG_DumpSTBtoDram 0x3D
-#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E
-#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F
+#define PPSMC_MSG_STBtoDramLogSetDramAddress 0x3E
+#define PPSMC_MSG_DummyUndefined 0x3F
#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40
#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41
+#define PPSMC_MSG_UseProfilingMode 0x42
#define PPSMC_MSG_AllowGfxDcs 0x43
#define PPSMC_MSG_DisallowGfxDcs 0x44
#define PPSMC_MSG_EnableAudioStutterWA 0x45
@@ -135,6 +135,16 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B
#define PPSMC_MSG_SetPriorityDeltaGain 0x4C
#define PPSMC_MSG_AllowIHHostInterrupt 0x4D
+#define PPSMC_MSG_EnableShadowDpm 0x4E
#define PPSMC_MSG_Mode3Reset 0x4F
-#define PPSMC_Message_Count 0x50
+#define PPSMC_MSG_SetDriverDramAddr 0x50
+#define PPSMC_MSG_SetToolsDramAddr 0x51
+#define PPSMC_MSG_TransferTableSmu2DramWithAddr 0x52
+#define PPSMC_MSG_TransferTableDram2SmuWithAddr 0x53
+#define PPSMC_MSG_GetAllRunningSmuFeatures 0x54
+#define PPSMC_MSG_GetSvi3Voltage 0x55
+#define PPSMC_MSG_UpdatePolicy 0x56
+#define PPSMC_MSG_ExtPwrConnSupport 0x57
+#define PPSMC_MSG_PreloadSwPstateForUclkOverDrive 0x58
+#define PPSMC_Message_Count 0x59
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 78c3f94..9974c9f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -121,6 +121,7 @@
#define P2S_TABLE_ID_A 0x50325341
#define P2S_TABLE_ID_X 0x50325358
+#define P2S_TABLE_ID_3 0x50325303
// clang-format off
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
@@ -271,14 +272,18 @@
struct amdgpu_device *adev = smu->adev;
uint32_t p2s_table_id = P2S_TABLE_ID_A;
int ret = 0, i, p2stable_count;
+ int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
/* No need to load P2S tables in IOV mode */
if (amdgpu_sriov_vf(adev))
return 0;
- if (!(adev->flags & AMD_IS_APU))
+ if (!(adev->flags & AMD_IS_APU)) {
p2s_table_id = P2S_TABLE_ID_X;
+ if (var == 0x5)
+ p2s_table_id = P2S_TABLE_ID_3;
+ }
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index a7d0231..7bc95c4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2378,7 +2378,7 @@
size += sysfs_emit_at(buf, size, " ");
for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
- size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
+ size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i],
(i == smu->power_profile_mode) ? "* " : " ");
size += sysfs_emit_at(buf, size, "\n");
@@ -2408,7 +2408,7 @@
do { \
size += sysfs_emit_at(buf, size, "%-30s", #field); \
for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \
- size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
+ size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
size += sysfs_emit_at(buf, size, "\n"); \
} while (0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index e1a2790..0c09b8c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -115,7 +115,6 @@
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
- MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
@@ -1824,50 +1823,6 @@
smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
}
-static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu,
- uint32_t size)
-{
- int ret = 0;
-
- /* message SMU to update the bad page number on SMUBUS */
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetNumBadMemoryPagesRetired,
- size, NULL);
- if (ret)
- dev_err(smu->adev->dev,
- "[%s] failed to message SMU to update bad memory pages number\n",
- __func__);
-
- return ret;
-}
-
-static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu,
- uint32_t size)
-{
- int ret = 0;
-
- /* message SMU to update the bad channel info on SMUBUS */
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
- size, NULL);
- if (ret)
- dev_err(smu->adev->dev,
- "[%s] failed to message SMU to update bad memory pages channel info\n",
- __func__);
-
- return ret;
-}
-
-static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
- void *table)
-{
- int ret = 0;
-
- // TODO
-
- return ret;
-}
-
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -2015,12 +1970,9 @@
.enable_gfx_features = smu_v14_0_2_enable_gfx_features,
.set_mp1_state = smu_v14_0_2_set_mp1_state,
.set_df_cstate = smu_v14_0_2_set_df_cstate,
- .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num,
- .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag,
#if 0
.gpo_control = smu_v14_0_gpo_control,
#endif
- .get_ecc_info = smu_v14_0_2_get_ecc_info,
};
void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 59f11af..dc75a92 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5935,6 +5935,18 @@
else
status = connector_status_disconnected;
+ if (status != connector_status_disconnected &&
+ !intel_dp_mst_verify_dpcd_state(intel_dp))
+ /*
+ * This requires retrying detection for instance to re-enable
+ * the MST mode that got reset via a long HPD pulse. The retry
+ * will happen either via the hotplug handler's retry logic,
+ * ensured by setting the connector here to SST/disconnected,
+ * or via a userspace connector probing in response to the
+ * hotplug uevent sent when removing the MST connectors.
+ */
+ status = connector_status_disconnected;
+
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 27ce5c3..17978a1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -1998,3 +1998,43 @@
return false;
}
+
+/*
+ * intel_dp_mst_verify_dpcd_state - verify the MST SW enabled state wrt. the DPCD
+ * @intel_dp: DP port object
+ *
+ * Verify if @intel_dp's MST enabled SW state matches the corresponding DPCD
+ * state. A long HPD pulse - not long enough to be detected as a disconnected
+ * state - could've reset the DPCD state, which requires tearing
+ * down/recreating the MST topology.
+ *
+ * Returns %true if the SW MST enabled and DPCD states match, %false
+ * otherwise.
+ */
+bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ int ret;
+ u8 val;
+
+ if (!intel_dp->is_mst)
+ return true;
+
+ ret = drm_dp_dpcd_readb(intel_dp->mst_mgr.aux, DP_MSTM_CTRL, &val);
+
+ /* Adjust the expected register value for SST + SideBand. */
+ if (ret < 0 || val != (DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC)) {
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] MST mode got reset, removing topology (ret=%d, ctrl=0x%02x)\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ ret, val);
+
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 8ca1d59..9e4c767 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -27,5 +27,6 @@
struct intel_link_bw_limits *limits);
bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index eae5b5e..931d2cf 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1870,7 +1870,6 @@
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)vlv_dsi_lenovo_yoga_tab3_backlight_fixup,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 3b69bc6..551b0d7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -212,6 +212,37 @@
}
}
+ if (IS_ARROWLAKE(gt->i915)) {
+ bool too_old = false;
+
+ /*
+ * ARL requires a newer firmware than MTL did (102.0.10.1878) but the
+ * firmware is actually common. So, need to do an explicit version check
+ * here rather than using a separate table entry. And if the older
+ * MTL-only version is found, then just don't use GSC rather than aborting
+ * the driver load.
+ */
+ if (gsc->release.major < 102) {
+ too_old = true;
+ } else if (gsc->release.major == 102) {
+ if (gsc->release.minor == 0) {
+ if (gsc->release.patch < 10) {
+ too_old = true;
+ } else if (gsc->release.patch == 10) {
+ if (gsc->release.build < 1878)
+ too_old = true;
+ }
+ }
+ }
+
+ if (too_old) {
+ gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index d80278e..ec33ad9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -698,12 +698,18 @@
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
+ int ret;
+
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_HUC:
- intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ ret = intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ if (ret)
+ return ret;
break;
case INTEL_UC_FW_TYPE_GSC:
- intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ ret = intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ if (ret)
+ return ret;
break;
default:
MISSING_CASE(uc_fw->type);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d7723dd..110340e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -546,6 +546,8 @@
#define IS_LUNARLAKE(i915) (0 && i915)
#define IS_BATTLEMAGE(i915) (0 && i915)
+#define IS_ARROWLAKE(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(i915) \
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d26de37..eede541 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -203,6 +203,10 @@
INTEL_DG2_G12_IDS(ID),
};
+static const u16 subplatform_arl_ids[] = {
+ INTEL_ARL_IDS(ID),
+};
+
static bool find_devid(u16 id, const u16 *p, unsigned int num)
{
for (; num; num--, p++) {
@@ -260,6 +264,9 @@
} else if (find_devid(devid, subplatform_g12_ids,
ARRAY_SIZE(subplatform_g12_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G12);
+ } else if (find_devid(devid, subplatform_arl_ids,
+ ARRAY_SIZE(subplatform_arl_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d1a2abc..df73ef9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -127,6 +127,9 @@
#define INTEL_SUBPLATFORM_N 1
#define INTEL_SUBPLATFORM_RPLU 2
+/* MTL */
+#define INTEL_SUBPLATFORM_ARL 0
+
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index b868281..ad1e623 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -134,6 +134,8 @@
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
+ preempt_disable();
+
write_seqcount_begin(&local_stats->lock);
local_stats->start_ns = now;
write_seqcount_end(&local_stats->lock);
@@ -141,6 +143,8 @@
write_seqcount_begin(&global_stats->lock);
global_stats->start_ns = now;
write_seqcount_end(&global_stats->lock);
+
+ preempt_enable();
}
static void
@@ -162,8 +166,10 @@
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
+ preempt_disable();
v3d_stats_update(local_stats, now);
v3d_stats_update(global_stats, now);
+ preempt_enable();
}
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 717d624..890a66a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,8 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+
+#include "vmwgfx_bo.h"
#include <linux/highmem.h>
/*
@@ -420,13 +422,105 @@
return 0;
}
+static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
+{
+ struct vmw_private *vmw =
+ container_of(bo->tbo.bdev, struct vmw_private, bdev);
+ void *ptr = NULL;
+ int ret;
+
+ if (bo->tbo.base.import_attach) {
+ ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
+ if (ret) {
+ drm_dbg_driver(&vmw->drm,
+ "Wasn't able to map external bo!\n");
+ goto out;
+ }
+ ptr = map->vaddr;
+ } else {
+ ptr = vmw_bo_map_and_cache(bo);
+ }
+
+out:
+ return ptr;
+}
+
+static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
+{
+ if (bo->tbo.base.import_attach)
+ dma_buf_vunmap(bo->tbo.base.dma_buf, map);
+ else
+ vmw_bo_unmap(bo);
+}
+
+static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
+ u32 dst_stride, struct vmw_bo *src,
+ u32 src_offset, u32 src_stride,
+ u32 width_in_bytes, u32 height,
+ struct vmw_diff_cpy *diff)
+{
+ struct vmw_private *vmw =
+ container_of(dst->tbo.bdev, struct vmw_private, bdev);
+ size_t dst_size = dst->tbo.resource->size;
+ size_t src_size = src->tbo.resource->size;
+ struct iosys_map dst_map = {0};
+ struct iosys_map src_map = {0};
+ int ret, i;
+ int x_in_bytes;
+ u8 *vsrc;
+ u8 *vdst;
+
+ vsrc = map_external(src, &src_map);
+ if (!vsrc) {
+ drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vdst = map_external(dst, &dst_map);
+ if (!vdst) {
+ drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsrc += src_offset;
+ vdst += dst_offset;
+ if (src_stride == dst_stride) {
+ dst_size -= dst_offset;
+ src_size -= src_offset;
+ memcpy(vdst, vsrc,
+ min(dst_stride * height, min(dst_size, src_size)));
+ } else {
+ WARN_ON(dst_stride < width_in_bytes);
+ for (i = 0; i < height; ++i) {
+ memcpy(vdst, vsrc, width_in_bytes);
+ vsrc += src_stride;
+ vdst += dst_stride;
+ }
+ }
+
+ x_in_bytes = (dst_offset % dst_stride);
+ diff->rect.x1 = x_in_bytes / diff->cpp;
+ diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
+ diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
+ diff->rect.y2 = diff->rect.y1 + height;
+
+ ret = 0;
+out:
+ unmap_external(src, &src_map);
+ unmap_external(dst, &dst_map);
+
+ return ret;
+}
+
/**
* vmw_bo_cpu_blit - in-kernel cpu blit.
*
- * @dst: Destination buffer object.
+ * @vmw_dst: Destination buffer object.
* @dst_offset: Destination offset of blit start in bytes.
* @dst_stride: Destination stride in bytes.
- * @src: Source buffer object.
+ * @vmw_src: Source buffer object.
* @src_offset: Source offset of blit start in bytes.
* @src_stride: Source stride in bytes.
* @w: Width of blit.
@@ -444,13 +538,15 @@
* Neither of the buffer objects may be placed in PCI memory
* (Fixed memory in TTM terminology) when using this function.
*/
-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
u32 dst_offset, u32 dst_stride,
- struct ttm_buffer_object *src,
+ struct vmw_bo *vmw_src,
u32 src_offset, u32 src_stride,
u32 w, u32 h,
struct vmw_diff_cpy *diff)
{
+ struct ttm_buffer_object *src = &vmw_src->tbo;
+ struct ttm_buffer_object *dst = &vmw_dst->tbo;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
@@ -460,6 +556,11 @@
int ret = 0;
struct page **dst_pages = NULL;
struct page **src_pages = NULL;
+ bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+ bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (WARN_ON(dst == src))
+ return -EINVAL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -479,6 +580,11 @@
return ret;
}
+ if (src_external || dst_external)
+ return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
+ vmw_src, src_offset, src_stride,
+ w, h, diff);
+
if (!src->ttm->pages && src->ttm->sg) {
src_pages = kvmalloc_array(src->ttm->num_pages,
sizeof(struct page *), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index f42ebc4..a0e433f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -360,6 +360,8 @@
void *virtual;
int ret;
+ atomic_inc(&vbo->map_count);
+
virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
if (virtual)
return virtual;
@@ -383,11 +385,17 @@
*/
void vmw_bo_unmap(struct vmw_bo *vbo)
{
+ int map_count;
+
if (vbo->map.bo == NULL)
return;
- ttm_bo_kunmap(&vbo->map);
- vbo->map.bo = NULL;
+ map_count = atomic_dec_return(&vbo->map_count);
+
+ if (!map_count) {
+ ttm_bo_kunmap(&vbo->map);
+ vbo->map.bo = NULL;
+ }
}
@@ -421,6 +429,7 @@
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
xa_init(&vmw_bo->detached_resources);
+ atomic_set(&vmw_bo->map_count, 0);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 62b4342..43b5439 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -71,6 +71,8 @@
* @map: Kmap object for semi-persistent mappings
* @res_tree: RB tree of resources using this buffer object as a backing MOB
* @res_prios: Eviction priority counts for attached resources
+ * @map_count: The number of currently active maps. Will differ from the
+ * cpu_writers because it includes kernel maps.
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
@@ -90,6 +92,7 @@
u32 res_prios[TTM_MAX_BO_PRIORITY];
struct xarray detached_resources;
+ atomic_t map_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 32f50e5..3f4719b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1353,9 +1353,9 @@
void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+int vmw_bo_cpu_blit(struct vmw_bo *dst,
u32 dst_offset, u32 dst_stride,
- struct ttm_buffer_object *src,
+ struct vmw_bo *src,
u32 src_offset, u32 src_stride,
u32 w, u32 h,
struct vmw_diff_cpy *diff);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5453f7c..fab155a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -502,7 +502,7 @@
container_of(dirty->unit, typeof(*stdu), base);
s32 width, height;
s32 src_pitch, dst_pitch;
- struct ttm_buffer_object *src_bo, *dst_bo;
+ struct vmw_bo *src_bo, *dst_bo;
u32 src_offset, dst_offset;
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
@@ -517,11 +517,11 @@
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
- src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ src_bo = stdu->display_srf->res.guest_memory_bo;
src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
- dst_bo = &ddirty->buf->tbo;
+ dst_bo = ddirty->buf;
dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
@@ -1170,7 +1170,7 @@
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
struct vmw_stdu_update_gb_image *cmd_img = cmd;
struct vmw_stdu_update *cmd_update;
- struct ttm_buffer_object *src_bo, *dst_bo;
+ struct vmw_bo *src_bo, *dst_bo;
u32 src_offset, dst_offset;
s32 src_pitch, dst_pitch;
s32 width, height;
@@ -1184,11 +1184,11 @@
diff.cpp = stdu->cpp;
- dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ dst_bo = stdu->display_srf->res.guest_memory_bo;
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
- src_bo = &vfbbo->buffer->tbo;
+ src_bo = vfbbo->buffer;
src_pitch = update->vfb->base.pitches[0];
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
stdu->cpp;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 8ae6a76..1625b30 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -2283,9 +2283,11 @@
/*
* Without mob support we're just going to use raw memory buffer
* because we wouldn't be able to support full surface coherency
- * without mobs
+ * without mobs. There also no reason to support surface coherency
+ * without 3d (i.e. gpu usage on the host) because then all the
+ * contents is going to be rendered guest side.
*/
- if (!dev_priv->has_mob) {
+ if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
int cpp = DIV_ROUND_UP(args->bpp, 8);
switch (cpp) {
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 832ea81..1faeca7 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -450,7 +450,7 @@
{
return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
- uval);
+ (uval & POWER_SETUP_I1_DATA_MASK));
}
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c7561a5..50e8fc4 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3341,9 +3341,10 @@
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
struct xe_tile *tile;
- struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE];
- u32 tile_needs_invalidate = 0;
+ struct xe_gt_tlb_invalidation_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
u8 id;
+ u32 fence_id = 0;
int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
@@ -3371,27 +3372,37 @@
if (xe_pt_zap_ptes(tile, vma)) {
xe_device_wmb(xe);
xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[id], true);
+ &fence[fence_id],
+ true);
- /*
- * FIXME: We potentially need to invalidate multiple
- * GTs within the tile
- */
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[id], vma);
+ &fence[fence_id], vma);
if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence[id]);
+ xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
goto wait;
}
+ ++fence_id;
- tile_needs_invalidate |= BIT(id);
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id],
+ true);
+
+ ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
+ &fence[fence_id], vma);
+ if (ret < 0) {
+ xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
+ goto wait;
+ }
+ ++fence_id;
}
}
wait:
- for_each_tile(tile, xe, id)
- if (tile_needs_invalidate & BIT(id))
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
vma->tile_invalidated = vma->tile_mask;
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 6bb8d7b..ee396f2 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -420,7 +420,7 @@
static const struct ec_board_info board_info_strix_x570_e_gaming = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR |
SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
SENSOR_IN_CPU_CORE,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
diff --git a/drivers/hwmon/pt5161l.c b/drivers/hwmon/pt5161l.c
index b0d58a2..a9f0b23 100644
--- a/drivers/hwmon/pt5161l.c
+++ b/drivers/hwmon/pt5161l.c
@@ -427,7 +427,7 @@
struct pt5161l_data *data = dev_get_drvdata(dev);
int ret;
u8 buf[8];
- long adc_code;
+ u32 adc_code;
switch (attr) {
case hwmon_temp_input:
@@ -449,7 +449,7 @@
adc_code = buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0];
if (adc_code == 0 || adc_code >= 0x3ff) {
- dev_dbg(dev, "Invalid adc_code %lx\n", adc_code);
+ dev_dbg(dev, "Invalid adc_code %x\n", adc_code);
return -EIO;
}
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index b3c34eb..9446657 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -91,48 +91,6 @@
return rc;
}
-static int cypress_ps2_read_cmd_status(struct psmouse *psmouse,
- u8 cmd, u8 *param)
-{
- struct ps2dev *ps2dev = &psmouse->ps2dev;
- enum psmouse_state old_state;
- int pktsize;
- int rc;
-
- ps2_begin_command(ps2dev);
-
- old_state = psmouse->state;
- psmouse->state = PSMOUSE_CMD_MODE;
- psmouse->pktcnt = 0;
-
- pktsize = (cmd == CYTP_CMD_READ_TP_METRICS) ? 8 : 3;
- memset(param, 0, pktsize);
-
- rc = cypress_ps2_sendbyte(psmouse, PSMOUSE_CMD_GETINFO & 0xff);
- if (rc)
- goto out;
-
- if (!wait_event_timeout(ps2dev->wait,
- psmouse->pktcnt >= pktsize,
- msecs_to_jiffies(CYTP_CMD_TIMEOUT))) {
- rc = -ETIMEDOUT;
- goto out;
- }
-
- memcpy(param, psmouse->packet, pktsize);
-
- psmouse_dbg(psmouse, "Command 0x%02x response data (0x): %*ph\n",
- cmd, pktsize, param);
-
-out:
- psmouse->state = old_state;
- psmouse->pktcnt = 0;
-
- ps2_end_command(ps2dev);
-
- return rc;
-}
-
static bool cypress_verify_cmd_state(struct psmouse *psmouse, u8 cmd, u8* param)
{
bool rate_match = false;
@@ -166,6 +124,8 @@
static int cypress_send_ext_cmd(struct psmouse *psmouse, u8 cmd, u8 *param)
{
u8 cmd_prefix = PSMOUSE_CMD_SETRES & 0xff;
+ unsigned int resp_size = cmd == CYTP_CMD_READ_TP_METRICS ? 8 : 3;
+ unsigned int ps2_cmd = (PSMOUSE_CMD_GETINFO & 0xff) | (resp_size << 8);
int tries = CYTP_PS2_CMD_TRIES;
int error;
@@ -179,10 +139,18 @@
cypress_ps2_ext_cmd(psmouse, cmd_prefix, DECODE_CMD_BB(cmd));
cypress_ps2_ext_cmd(psmouse, cmd_prefix, DECODE_CMD_AA(cmd));
- error = cypress_ps2_read_cmd_status(psmouse, cmd, param);
- if (!error && cypress_verify_cmd_state(psmouse, cmd, param))
- return 0;
+ error = ps2_command(&psmouse->ps2dev, param, ps2_cmd);
+ if (error) {
+ psmouse_dbg(psmouse, "Command 0x%02x failed: %d\n",
+ cmd, error);
+ } else {
+ psmouse_dbg(psmouse,
+ "Command 0x%02x response data (0x): %*ph\n",
+ cmd, resp_size, param);
+ if (cypress_verify_cmd_state(psmouse, cmd, param))
+ return 0;
+ }
} while (--tries > 0);
return -EIO;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index a31460f..ed2b106 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1777,7 +1777,7 @@
goto out_unlock;
}
- iommu_report_device_fault(master->dev, &fault_evt);
+ ret = iommu_report_device_fault(master->dev, &fault_evt);
out_unlock:
mutex_unlock(&smmu->streams_mutex);
return ret;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9ff8b83..4aa070c 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1944,6 +1944,7 @@
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
+ u16 did;
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
@@ -1952,10 +1953,11 @@
return;
}
+ did = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- intel_context_flush_present(info, context, true);
+ intel_context_flush_present(info, context, did, true);
}
static int domain_setup_first_level(struct intel_iommu *iommu,
@@ -4249,6 +4251,7 @@
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
struct context_entry *context;
+ u16 did;
spin_lock(&iommu->lock);
if (context_copied(iommu, bus, devfn)) {
@@ -4261,6 +4264,7 @@
spin_unlock(&iommu->lock);
return -ENODEV;
}
+ did = context_domain_id(context);
if (enable)
context_set_sm_pre(context);
@@ -4269,7 +4273,7 @@
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
- intel_context_flush_present(info, context, true);
+ intel_context_flush_present(info, context, did, true);
spin_unlock(&iommu->lock);
return 0;
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index b67c14d..a969be2 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -1154,7 +1154,7 @@
void intel_context_flush_present(struct device_domain_info *info,
struct context_entry *context,
- bool affect_domains);
+ u16 did, bool affect_domains);
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 5792c81..b51fc26 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -683,6 +683,7 @@
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
+ u16 did;
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, false);
@@ -691,10 +692,11 @@
return;
}
+ did = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- intel_context_flush_present(info, context, false);
+ intel_context_flush_present(info, context, did, false);
}
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -885,10 +887,9 @@
*/
void intel_context_flush_present(struct device_domain_info *info,
struct context_entry *context,
- bool flush_domains)
+ u16 did, bool flush_domains)
{
struct intel_iommu *iommu = info->iommu;
- u16 did = context_domain_id(context);
struct pasid_entry *pte;
int i;
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 81e9cc6..4674e61 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -115,6 +115,59 @@
return group;
}
+static struct iommu_attach_handle *find_fault_handler(struct device *dev,
+ struct iopf_fault *evt)
+{
+ struct iommu_fault *fault = &evt->fault;
+ struct iommu_attach_handle *attach_handle;
+
+ if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
+ attach_handle = iommu_attach_handle_get(dev->iommu_group,
+ fault->prm.pasid, 0);
+ if (IS_ERR(attach_handle)) {
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+
+ if (!ops->user_pasid_table)
+ return NULL;
+ /*
+ * The iommu driver for this device supports user-
+ * managed PASID table. Therefore page faults for
+ * any PASID should go through the NESTING domain
+ * attached to the device RID.
+ */
+ attach_handle = iommu_attach_handle_get(
+ dev->iommu_group, IOMMU_NO_PASID,
+ IOMMU_DOMAIN_NESTED);
+ if (IS_ERR(attach_handle))
+ return NULL;
+ }
+ } else {
+ attach_handle = iommu_attach_handle_get(dev->iommu_group,
+ IOMMU_NO_PASID, 0);
+
+ if (IS_ERR(attach_handle))
+ return NULL;
+ }
+
+ if (!attach_handle->domain->iopf_handler)
+ return NULL;
+
+ return attach_handle;
+}
+
+static void iopf_error_response(struct device *dev, struct iopf_fault *evt)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_fault *fault = &evt->fault;
+ struct iommu_page_response resp = {
+ .pasid = fault->prm.pasid,
+ .grpid = fault->prm.grpid,
+ .code = IOMMU_PAGE_RESP_INVALID
+ };
+
+ ops->page_response(dev, evt, &resp);
+}
+
/**
* iommu_report_device_fault() - Report fault event to device driver
* @dev: the device
@@ -153,24 +206,39 @@
* handling framework should guarantee that the iommu domain could only be
* freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults
- * have been flushed.
+ * have been flushed. In case no page fault handler is attached or no iopf params
+ * are setup, then the ops->page_response() is called to complete the evt.
+ *
+ * Returns 0 on success, or an error in case of a bad/failed iopf setup.
*/
-void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
+int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{
+ struct iommu_attach_handle *attach_handle;
struct iommu_fault *fault = &evt->fault;
struct iommu_fault_param *iopf_param;
struct iopf_group abort_group = {};
struct iopf_group *group;
+ attach_handle = find_fault_handler(dev, evt);
+ if (!attach_handle)
+ goto err_bad_iopf;
+
+ /*
+ * Something has gone wrong if a fault capable domain is attached but no
+ * iopf_param is setup
+ */
iopf_param = iopf_get_dev_fault_param(dev);
if (WARN_ON(!iopf_param))
- return;
+ goto err_bad_iopf;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- report_partial_fault(iopf_param, fault);
+ int ret;
+
+ ret = report_partial_fault(iopf_param, fault);
iopf_put_dev_fault_param(iopf_param);
/* A request that is not the last does not need to be ack'd */
- return;
+
+ return ret;
}
/*
@@ -185,38 +253,7 @@
if (group == &abort_group)
goto err_abort;
- if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
- group->attach_handle = iommu_attach_handle_get(dev->iommu_group,
- fault->prm.pasid,
- 0);
- if (IS_ERR(group->attach_handle)) {
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (!ops->user_pasid_table)
- goto err_abort;
-
- /*
- * The iommu driver for this device supports user-
- * managed PASID table. Therefore page faults for
- * any PASID should go through the NESTING domain
- * attached to the device RID.
- */
- group->attach_handle =
- iommu_attach_handle_get(dev->iommu_group,
- IOMMU_NO_PASID,
- IOMMU_DOMAIN_NESTED);
- if (IS_ERR(group->attach_handle))
- goto err_abort;
- }
- } else {
- group->attach_handle =
- iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
- if (IS_ERR(group->attach_handle))
- goto err_abort;
- }
-
- if (!group->attach_handle->domain->iopf_handler)
- goto err_abort;
+ group->attach_handle = attach_handle;
/*
* On success iopf_handler must call iopf_group_response() and
@@ -225,7 +262,7 @@
if (group->attach_handle->domain->iopf_handler(group))
goto err_abort;
- return;
+ return 0;
err_abort:
dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n",
@@ -235,6 +272,14 @@
__iopf_free_group(group);
else
iopf_free_group(group);
+
+ return 0;
+
+err_bad_iopf:
+ if (fault->type == IOMMU_FAULT_PAGE_REQ)
+ iopf_error_response(dev, evt);
+
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 75f244a..06ffc68 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -552,9 +552,8 @@
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
- /* If no access, then nothing to do */
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
+ return -EINVAL;
while (pgcount--) {
ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f5d9fd1..ff4149a 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -515,9 +515,8 @@
if (WARN_ON(iaext || paddr >> cfg->oas))
return -ERANGE;
- /* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
+ return -EINVAL;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index ad28031..c004640 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -245,9 +245,8 @@
if (WARN_ON(paddr >> cfg->oas))
return -ERANGE;
- /* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
- return 0;
+ return -EINVAL;
tbl = dart_get_table(data, iova);
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index 7422482..157a89b 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -213,6 +213,10 @@
if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
return -EOVERFLOW;
+ if (!(cmd->flags &
+ (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
+ return -EINVAL;
+
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
@@ -253,6 +257,10 @@
cmd->dst_iova >= ULONG_MAX)
return -EOVERFLOW;
+ if (!(cmd->flags &
+ (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
+ return -EINVAL;
+
src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
if (IS_ERR(src_ioas))
return PTR_ERR(src_ioas);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f74bacf..bb9c3d6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -427,6 +427,8 @@
struct netlink_ext_ack *extack)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
@@ -438,74 +440,80 @@
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
- rcu_read_unlock();
- return -ENODEV;
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+ if (!real_dev) {
+ err = -ENODEV;
+ goto out;
}
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
- rcu_read_unlock();
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec) {
- rcu_read_unlock();
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
- xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
+ xs->xso.real_dev = real_dev;
+ err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
list_add(&ipsec->list, &bond->ipsec_list);
- spin_unlock_bh(&bond->ipsec_lock);
+ mutex_unlock(&bond->ipsec_lock);
} else {
kfree(ipsec);
}
- rcu_read_unlock();
+out:
+ netdev_put(real_dev, &tracker);
return err;
}
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
struct slave *slave;
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave)
- goto out;
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
+ return;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
- netif_is_bond_master(slave->dev)) {
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(real_dev)) {
if (!list_empty(&bond->ipsec_list))
- slave_warn(bond_dev, slave->dev,
+ slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_add\n",
__func__);
- spin_unlock_bh(&bond->ipsec_lock);
goto out;
}
- spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
- ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
- slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
+ /* If new state is added before ipsec_lock acquired */
+ if (ipsec->xs->xso.real_dev == real_dev)
+ continue;
+
+ ipsec->xs->xso.real_dev = real_dev;
+ if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
+ slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
}
- spin_unlock_bh(&bond->ipsec_lock);
out:
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
/**
@@ -515,6 +523,8 @@
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
@@ -525,6 +535,9 @@
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
+ rcu_read_unlock();
if (!slave)
goto out;
@@ -532,18 +545,19 @@
if (!xs->xso.real_dev)
goto out;
- WARN_ON(xs->xso.real_dev != slave->dev);
+ WARN_ON(xs->xso.real_dev != real_dev);
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
goto out;
}
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
out:
- spin_lock_bh(&bond->ipsec_lock);
+ netdev_put(real_dev, &tracker);
+ mutex_lock(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (ipsec->xs == xs) {
list_del(&ipsec->list);
@@ -551,40 +565,72 @@
break;
}
}
- spin_unlock_bh(&bond->ipsec_lock);
- rcu_read_unlock();
+ mutex_unlock(&bond->ipsec_lock);
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
+ struct net_device *real_dev;
struct bond_ipsec *ipsec;
struct slave *slave;
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- if (!slave) {
- rcu_read_unlock();
+ slave = rtnl_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ if (!real_dev)
return;
- }
- spin_lock_bh(&bond->ipsec_lock);
+ mutex_lock(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (!ipsec->xs->xso.real_dev)
continue;
- if (!slave->dev->xfrmdev_ops ||
- !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
- netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev,
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(real_dev)) {
+ slave_warn(bond_dev, real_dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
} else {
- slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ if (real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
}
}
- spin_unlock_bh(&bond->ipsec_lock);
+ mutex_unlock(&bond->ipsec_lock);
+}
+
+static void bond_ipsec_free_sa(struct xfrm_state *xs)
+{
+ struct net_device *bond_dev = xs->xso.dev;
+ struct net_device *real_dev;
+ netdevice_tracker tracker;
+ struct bonding *bond;
+ struct slave *slave;
+
+ if (!bond_dev)
+ return;
+
+ rcu_read_lock();
+ bond = netdev_priv(bond_dev);
+ slave = rcu_dereference(bond->curr_active_slave);
+ real_dev = slave ? slave->dev : NULL;
+ netdev_hold(real_dev, &tracker, GFP_ATOMIC);
rcu_read_unlock();
+
+ if (!slave)
+ goto out;
+
+ if (!xs->xso.real_dev)
+ goto out;
+
+ WARN_ON(xs->xso.real_dev != real_dev);
+
+ if (real_dev && real_dev->xfrmdev_ops &&
+ real_dev->xfrmdev_ops->xdo_dev_state_free)
+ real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
+out:
+ netdev_put(real_dev, &tracker);
}
/**
@@ -627,6 +673,7 @@
static const struct xfrmdev_ops bond_xfrmdev_ops = {
.xdo_dev_state_add = bond_ipsec_add_sa,
.xdo_dev_state_delete = bond_ipsec_del_sa,
+ .xdo_dev_state_free = bond_ipsec_free_sa,
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
};
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -5877,7 +5924,7 @@
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
INIT_LIST_HEAD(&bond->ipsec_list);
- spin_lock_init(&bond->ipsec_lock);
+ mutex_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
@@ -5926,6 +5973,10 @@
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
+#ifdef CONFIG_XFRM_OFFLOAD
+ mutex_destroy(&bond->ipsec_lock);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
bond_set_slave_arr(bond, NULL, NULL);
list_del_rcu(&bond->bond_list);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index fddfd1d..4c546c3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -572,7 +572,7 @@
(*processed)++;
return true;
- drop:
+drop:
/* Clean rxdes0 (which resets own bit) */
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
@@ -656,6 +656,11 @@
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
return true;
@@ -809,6 +814,11 @@
dma_wmb();
first->txdes0 = cpu_to_le32(f_ctl_stat);
+ /* Ensure the descriptor config is visible before setting the tx
+ * pointer.
+ */
+ smp_wmb();
+
/* Update next TX pointer */
priv->tx_pointer = pointer;
@@ -829,7 +839,7 @@
return NETDEV_TX_OK;
- dma_err:
+dma_err:
if (net_ratelimit())
netdev_err(netdev, "map tx fragment failed\n");
@@ -851,7 +861,7 @@
* last fragment, so we know ftgmac100_free_tx_packet()
* hasn't freed the skb yet.
*/
- drop:
+drop:
/* Drop the packet */
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
@@ -1344,7 +1354,7 @@
ftgmac100_init_all(priv, true);
netdev_dbg(netdev, "Reset done !\n");
- bail:
+bail:
if (priv->mii_bus)
mutex_unlock(&priv->mii_bus->mdio_lock);
if (netdev->phydev)
@@ -1543,15 +1553,15 @@
return 0;
- err_ncsi:
+err_ncsi:
napi_disable(&priv->napi);
netif_stop_queue(netdev);
- err_alloc:
+err_alloc:
ftgmac100_free_buffers(priv);
free_irq(netdev->irq, netdev);
- err_irq:
+err_irq:
netif_napi_del(&priv->napi);
- err_hw:
+err_hw:
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
ftgmac100_free_rings(priv);
return err;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index cafded2..a00f915 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -52,32 +52,6 @@
return 0;
}
-static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
- const struct gdma_resp_hdr *resp_msg)
-{
- struct hwc_caller_ctx *ctx;
- int err;
-
- if (!test_bit(resp_msg->response.hwc_msg_id,
- hwc->inflight_msg_res.map)) {
- dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
- resp_msg->response.hwc_msg_id);
- return;
- }
-
- ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
- err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
- if (err)
- goto out;
-
- ctx->status_code = resp_msg->status;
-
- memcpy(ctx->output_buf, resp_msg, resp_len);
-out:
- ctx->error = err;
- complete(&ctx->comp_event);
-}
-
static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
struct hwc_work_request *req)
{
@@ -101,6 +75,40 @@
return err;
}
+static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+ struct hwc_work_request *rx_req)
+{
+ const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
+ struct hwc_caller_ctx *ctx;
+ int err;
+
+ if (!test_bit(resp_msg->response.hwc_msg_id,
+ hwc->inflight_msg_res.map)) {
+ dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+ resp_msg->response.hwc_msg_id);
+ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+ return;
+ }
+
+ ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
+ err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
+ if (err)
+ goto out;
+
+ ctx->status_code = resp_msg->status;
+
+ memcpy(ctx->output_buf, resp_msg, resp_len);
+out:
+ ctx->error = err;
+
+ /* Must post rx wqe before complete(), otherwise the next rx may
+ * hit no_wqe error.
+ */
+ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+
+ complete(&ctx->comp_event);
+}
+
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
@@ -235,14 +243,12 @@
return;
}
- mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
+ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
- /* Do no longer use 'resp', because the buffer is posted to the HW
- * in the below mana_hwc_post_rx_wqe().
+ /* Can no longer use 'resp', because the buffer is posted to the HW
+ * in mana_hwc_handle_resp() above.
*/
resp = NULL;
-
- mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
}
static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c647033..f2f07bf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -32,7 +32,7 @@
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
-#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
struct ionic_dev_bar {
void __iomem *vaddr;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index aa0cc31..86774d9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3220,7 +3220,7 @@
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
- netdev->watchdog_timeo = 2 * HZ;
+ netdev->watchdog_timeo = 5 * HZ;
netif_carrier_off(netdev);
lif->identity = lid;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 3e51b3a..e3451be 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1452,6 +1452,7 @@
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .quirk_10m_link_issue = 1,
.switch_mode = 1,
};
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 0696faf..2e94d10 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1653,7 +1653,7 @@
sock = sockfd_lookup(fd, &err);
if (!sock) {
pr_debug("gtp socket fd=%d not found\n", fd);
- return NULL;
+ return ERR_PTR(err);
}
sk = sock->sk;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 79774c8..8c8880b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -725,22 +725,25 @@
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
- entry->integer.value > num_profiles) {
+ entry->integer.value > num_profiles ||
+ entry->integer.value <
+ rev_data[idx].min_profiles) {
ret = -EINVAL;
goto out_free;
}
- num_profiles = entry->integer.value;
/*
- * this also validates >= min_profiles since we
- * otherwise wouldn't have gotten the data when
- * looking up in ACPI
+ * Check to see if we received package count
+ * same as max # of profiles
*/
if (wifi_pkg->package.count !=
hdr_size + profile_size * num_profiles) {
ret = -EINVAL;
goto out_free;
}
+
+ /* Number of valid profiles */
+ num_profiles = entry->integer.value;
}
goto read_table;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa57df3..fb2ea38 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -3348,7 +3348,7 @@
{
int ret __maybe_unused = 0;
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ if (!iwl_trans_fw_running(fwrt->trans))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 595fa6d..8ef5ed2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -85,6 +85,10 @@
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
* @time_point: called when transport layer wants to collect debug data
+ * @device_powered_off: called upon resume from hibernation but not only.
+ * Op_mode needs to reset its internal state because the device did not
+ * survive the system state transition. The firmware is no longer running,
+ * etc...
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -107,6 +111,7 @@
void (*time_point)(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
+ void (*device_powered_off)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -204,4 +209,11 @@
op_mode->ops->time_point(op_mode, tp_id, tp_data);
}
+static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off)
+ return;
+ op_mode->ops->device_powered_off(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 6148acb..0ef48ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1128,8 +1128,8 @@
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
- iwl_op_mode_nic_error(trans->op_mode, sync);
trans->state = IWL_TRANS_NO_FW;
+ iwl_op_mode_nic_error(trans->op_mode, sync);
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b4d6505..99a541d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3439,6 +3439,16 @@
mutex_lock(&mvm->mutex);
+ /* Apparently, the device went away and device_powered_off() was called,
+ * don't even try to read the rt_status, the device is currently
+ * inaccessible.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ IWL_INFO(mvm,
+ "Can't resume, device_powered_off() was called during wowlan\n");
+ goto err;
+ }
+
mvm->last_reset_or_resume_time_jiffies = jiffies;
/* get the BSS vif pointer again */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 835a05b9..625ccf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5818,6 +5818,10 @@
int i;
if (!iwl_mvm_has_new_tx_api(mvm)) {
+ /* we can't ask the firmware anything if it is dead */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ return;
if (drop) {
guard(mvm)(mvm);
iwl_mvm_flush_tx_path(mvm,
@@ -5911,8 +5915,11 @@
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
+ * If the firmware is dead, this can't work...
*/
- if (!drop && !iwl_mvm_has_new_tx_api(mvm))
+ if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index b7dcae7..b9daaff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1198,10 +1198,12 @@
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, trig_link_selection_wk);
+ mutex_lock(&mvm->mutex);
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_find_link_selection_vif,
NULL);
+ mutex_unlock(&mvm->mutex);
}
static struct iwl_op_mode *
@@ -1511,6 +1513,8 @@
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ iwl_mvm_pause_tcm(mvm, false);
+
iwl_fw_dbg_stop_sync(&mvm->fwrt);
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
@@ -2090,6 +2094,20 @@
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mvm_stop_device(mvm);
+#ifdef CONFIG_PM
+ mvm->fast_resume = false;
+#endif
+ mutex_unlock(&mvm->mutex);
+}
+
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
.queue_full = iwl_mvm_stop_sw_queue, \
@@ -2102,7 +2120,8 @@
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
.stop = iwl_op_mode_mvm_stop, \
- .time_point = iwl_op_mode_mvm_time_point
+ .time_point = iwl_op_mode_mvm_time_point, \
+ .device_powered_off = iwl_op_mode_mvm_device_powered_off
static const struct iwl_op_mode_ops iwl_mvm_ops = {
IWL_MVM_COMMON_OPS,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 8e0df31..1cc9c42 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -48,6 +48,8 @@
/* Number of iterations on the channel for mei filtered scan */
#define IWL_MEI_SCAN_NUM_ITER 5U
+#define WFA_TPC_IE_LEN 9
+
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
u32 max_out_time;
@@ -303,8 +305,8 @@
max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
- /* we create the 802.11 header and SSID element */
- max_probe_len -= 24 + 2;
+ /* we create the 802.11 header SSID element and WFA TPC element */
+ max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
/* DS parameter set element is added on 2.4GHZ band if required */
if (iwl_mvm_rrm_scan_needed(mvm))
@@ -731,8 +733,6 @@
return newpos;
}
-#define WFA_TPC_IE_LEN 9
-
static void iwl_mvm_add_tpc_report_ie(u8 *pos)
{
pos[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -837,8 +837,8 @@
return ((n_ssids <= PROBE_OPTION_MAX) &&
(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
(ies->common_ie_len +
- ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] <=
+ ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
+ ies->len[NL80211_BAND_6GHZ] <=
iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
}
@@ -1659,6 +1659,17 @@
cfg->v2.channel_num = channels[i]->hw_value;
if (cfg80211_channel_is_psc(channels[i]))
cfg->flags = 0;
+
+ if (band == NL80211_BAND_6GHZ) {
+ /* 6 GHz channels should only appear in a scan request
+ * that has scan_6ghz set. The only exception is MLO
+ * scan, which has to be passive.
+ */
+ WARN_ON_ONCE(cfg->flags != 0);
+ cfg->flags =
+ cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+ }
+
cfg->v2.iter_count = 1;
cfg->v2.iter_interval = 0;
if (version < 17)
@@ -3168,18 +3179,16 @@
params.n_channels = j;
}
- if (non_psc_included &&
- !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
- kfree(params.channels);
- return -ENOBUFS;
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
+ ret = -ENOBUFS;
+ goto out;
}
uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type);
-
- if (non_psc_included)
- kfree(params.channels);
- if (uid < 0)
- return uid;
+ if (uid < 0) {
+ ret = uid;
+ goto out;
+ }
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
@@ -3197,6 +3206,9 @@
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
+out:
+ if (non_psc_included)
+ kfree(params.channels);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index e63efbf..ae93a72 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -89,7 +89,8 @@
}
break;
default:
- IWL_ERR(trans, "WRT: Invalid buffer destination\n");
+ IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
+ le32_to_cpu(fw_mon_cfg->buf_location));
}
out:
if (dbg_flags)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 9ad4346..84fd932 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1577,11 +1577,12 @@
return 0;
}
-static int iwl_pci_resume(struct device *device)
+static int _iwl_pci_resume(struct device *device, bool restore)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool device_was_powered_off = false;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -1597,6 +1598,26 @@
if (!trans->op_mode)
return 0;
+ /*
+ * Scratch value was altered, this means the device was powered off, we
+ * need to reset it completely.
+ * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
+ * so assume that any bits there mean that the device is usable.
+ */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
+ !iwl_read32(trans, CSR_FUNC_SCRATCH))
+ device_was_powered_off = true;
+
+ if (restore || device_was_powered_off) {
+ trans->state = IWL_TRANS_NO_FW;
+ /* Hope for the best here ... If one of those steps fails we
+ * won't really know how to recover.
+ */
+ iwl_pcie_prepare_card_hw(trans);
+ iwl_finish_nic_init(trans);
+ iwl_op_mode_device_powered_off(trans->op_mode);
+ }
+
/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return 0;
@@ -1617,9 +1638,23 @@
return 0;
}
+static int iwl_pci_restore(struct device *device)
+{
+ return _iwl_pci_resume(device, true);
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+ return _iwl_pci_resume(device, false);
+}
+
static const struct dev_pm_ops iwl_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
- iwl_pci_resume)
+ .suspend = pm_sleep_ptr(iwl_pci_suspend),
+ .resume = pm_sleep_ptr(iwl_pci_resume),
+ .freeze = pm_sleep_ptr(iwl_pci_suspend),
+ .thaw = pm_sleep_ptr(iwl_pci_resume),
+ .poweroff = pm_sleep_ptr(iwl_pci_suspend),
+ .restore = pm_sleep_ptr(iwl_pci_restore),
};
#define IWL_PM_OPS (&iwl_dev_pm_ops)
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 155eb0f..bf35c92 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -4363,11 +4363,27 @@
if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
- wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
- if (adapter->config_bands & BAND_A)
- wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
- else
+ wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
+ &mwifiex_band_2ghz,
+ sizeof(mwifiex_band_2ghz),
+ GFP_KERNEL);
+ if (!wiphy->bands[NL80211_BAND_2GHZ]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (adapter->config_bands & BAND_A) {
+ wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
+ &mwifiex_band_5ghz,
+ sizeof(mwifiex_band_5ghz),
+ GFP_KERNEL);
+ if (!wiphy->bands[NL80211_BAND_5GHZ]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ } else {
wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ }
if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4461,8 +4477,7 @@
if (ret < 0) {
mwifiex_dbg(adapter, ERROR,
"%s: wiphy_register failed: %d\n", __func__, ret);
- wiphy_free(wiphy);
- return ret;
+ goto err;
}
if (!adapter->regd) {
@@ -4504,4 +4519,9 @@
adapter->wiphy = wiphy;
return ret;
+
+err:
+ wiphy_free(wiphy);
+
+ return ret;
}
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 216d43c..7c04810 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -352,8 +352,11 @@
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
- if (unlikely(!ptr))
+ if (!ptr) {
+ /* No RSN IE is fine in open networks */
+ ret = 0;
goto free_skb;
+ }
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index b19c39d..e2bc673 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1723,6 +1723,11 @@
}
pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
+ if (!dev->poll_mod_count) {
+ nfc_err(dev->dev,
+ "Poll mod list is empty\n");
+ return -EINVAL;
+ }
/* Do not always start polling from the same modulation */
get_random_bytes(&rand_mod, sizeof(rand_mod));
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 389d4ea..ef622d4 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -592,7 +592,7 @@
* This can happen for example on DT systems that do EFI
* booting and may provide a GOP handle to the EFI stub.
*/
- sysfb_disable();
+ sysfb_disable(NULL);
of_platform_device_create(node, NULL, NULL);
of_node_put(node);
}
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 236229f..a9b263f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -58,6 +58,7 @@
#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
+#define PARF_INT_ALL_5_MASK 0x2dcc
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -127,6 +128,9 @@
/* PARF_CFG_BITS register fields */
#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+/* PARF_INT_ALL_5_MASK fields */
+#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
@@ -158,10 +162,12 @@
* struct qcom_pcie_ep_cfg - Per SoC config struct
* @hdma_support: HDMA support on this SoC
* @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
*/
struct qcom_pcie_ep_cfg {
bool hdma_support;
bool override_no_snoop;
+ bool disable_mhi_ram_parity_check;
};
/**
@@ -480,6 +486,12 @@
PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
+ if (pcie_ep->cfg && pcie_ep->cfg->disable_mhi_ram_parity_check) {
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ val &= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ }
+
ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
@@ -901,6 +913,7 @@
static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
.hdma_support = true,
.override_no_snoop = true,
+ .disable_mhi_ram_parity_check = true,
};
static const struct of_device_id qcom_pcie_ep_match[] = {
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 0180edf..6f953e3 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -261,6 +261,7 @@
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
bool suspended;
+ bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -1433,7 +1434,7 @@
dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
ret);
}
- } else {
+ } else if (pcie->use_pm_opp) {
freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
if (freq_mbps < 0)
return;
@@ -1592,6 +1593,8 @@
max_freq);
goto err_pm_runtime_put;
}
+
+ pcie->use_pm_opp = true;
} else {
/* Skip ICC init if OPP is supported as it is handled by OPP */
ret = qcom_pcie_icc_init(pcie);
@@ -1683,7 +1686,7 @@
if (ret)
dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
- if (!pcie->icc_mem)
+ if (pcie->use_pm_opp)
dev_pm_opp_set_opp(pcie->pci->dev, NULL);
}
return ret;
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index 0b9a59d5b..adc6394 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -176,7 +176,7 @@
imx_phy->comp_dis_tune =
phy_comp_dis_tune_from_property(imx_phy->comp_dis_tune);
- if (device_property_read_u32(dev, "fsl,pcs-tx-deemph-3p5db-attenuation-db",
+ if (device_property_read_u32(dev, "fsl,phy-pcs-tx-deemph-3p5db-attenuation-db",
&imx_phy->pcs_tx_deemph_3p5db))
imx_phy->pcs_tx_deemph_3p5db = PHY_TUNE_DEFAULT;
else
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 5b36cc7..06cd978 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -1245,8 +1245,8 @@
static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88),
- QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x0d),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0, 0xd4),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1, 0x12),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2, 0xdb),
@@ -1263,6 +1263,7 @@
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_SUMMER_CAL_SPD_MODE, 0x5b),
};
static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_tx_tbl[] = {
@@ -1286,12 +1287,15 @@
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_1, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_2, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x45),
- QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0b),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0a, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0b, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_VGA_CAL_CNTRL1, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x3a, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38, 2),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x39),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0xb3),
@@ -1307,6 +1311,7 @@
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x4b),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76),
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_TX_ADPT_CTRL, 0x10),
};
static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_pcs_tbl[] = {
@@ -1314,6 +1319,8 @@
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_RX_SIGDET_LVL, 0xcc),
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG4, 0x00),
QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG5, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_TX_RX_CONFIG1, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_TX_RX_CONFIG2, 0x02),
};
static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
@@ -1324,11 +1331,13 @@
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_PRE_GAIN, 0x2e),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG1, 0x03),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G3_RXEQEVAL_TIME, 0x27),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_RXEQEVAL_TIME, 0x27),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_TX_RX_CONFIG, 0xc0),
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_POWER_STATE_CONFIG2, 0x1d),
- QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG5, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G3_FOM_EQ_CONFIG5, 0xf2),
- QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG5, 0x18),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G3_FOM_EQ_CONFIG5, 0x7a),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0x8a),
};
static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = {
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index df52b78..9cbf901 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -1745,7 +1745,7 @@
sizeof(*phy_drd->regulators),
GFP_KERNEL);
if (!phy_drd->regulators)
- return ENOMEM;
+ return -ENOMEM;
regulator_bulk_set_supply_names(phy_drd->regulators,
drv_data->regulator_names,
drv_data->n_regulators);
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index cb15041..e657900 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -160,6 +160,24 @@
/* Timeout values */
#define TIMEOUT_US 1000
+/* Lane 0/1/2/3 offset */
+#define DIG_8(n) ((0x4000 * (n)) + 0x1074)
+#define ILL13(n) ((0x4000 * (n)) + 0x1994)
+#define DIG_10(n) ((0x4000 * (n)) + 0x107c)
+#define RST_DLY(n) ((0x4000 * (n)) + 0x19a4)
+#define BYP_15(n) ((0x4000 * (n)) + 0x1038)
+#define BYP_12(n) ((0x4000 * (n)) + 0x102c)
+#define MISC3(n) ((0x4000 * (n)) + 0x19ac)
+#define EQ11(n) ((0x4000 * (n)) + 0x1978)
+
+static u32 save_reg_address[] = {
+ /* Lane 0/1/2/3 Register */
+ DIG_8(0), ILL13(0), DIG_10(0), RST_DLY(0), BYP_15(0), BYP_12(0), MISC3(0), EQ11(0),
+ DIG_8(1), ILL13(1), DIG_10(1), RST_DLY(1), BYP_15(1), BYP_12(1), MISC3(1), EQ11(1),
+ DIG_8(2), ILL13(2), DIG_10(2), RST_DLY(2), BYP_15(2), BYP_12(2), MISC3(2), EQ11(2),
+ DIG_8(3), ILL13(3), DIG_10(3), RST_DLY(3), BYP_15(3), BYP_12(3), MISC3(3), EQ11(3),
+};
+
struct xpsgtr_dev;
/**
@@ -209,6 +227,7 @@
* @tx_term_fix: fix for GT issue
* @saved_icm_cfg0: stored value of ICM CFG0 register
* @saved_icm_cfg1: stored value of ICM CFG1 register
+ * @saved_regs: registers to be saved/restored during suspend/resume
*/
struct xpsgtr_dev {
struct device *dev;
@@ -221,6 +240,7 @@
bool tx_term_fix;
unsigned int saved_icm_cfg0;
unsigned int saved_icm_cfg1;
+ u32 *saved_regs;
};
/*
@@ -294,6 +314,32 @@
writel((readl(addr) & ~clr) | set, addr);
}
+/**
+ * xpsgtr_save_lane_regs - Saves registers on suspend
+ * @gtr_dev: pointer to phy controller context structure
+ */
+static void xpsgtr_save_lane_regs(struct xpsgtr_dev *gtr_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
+ gtr_dev->saved_regs[i] = xpsgtr_read(gtr_dev,
+ save_reg_address[i]);
+}
+
+/**
+ * xpsgtr_restore_lane_regs - Restores registers on resume
+ * @gtr_dev: pointer to phy controller context structure
+ */
+static void xpsgtr_restore_lane_regs(struct xpsgtr_dev *gtr_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_address); i++)
+ xpsgtr_write(gtr_dev, save_reg_address[i],
+ gtr_dev->saved_regs[i]);
+}
+
/*
* Hardware Configuration
*/
@@ -837,6 +883,8 @@
gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
+ xpsgtr_save_lane_regs(gtr_dev);
+
return 0;
}
@@ -847,6 +895,8 @@
unsigned int i;
bool skip_phy_init;
+ xpsgtr_restore_lane_regs(gtr_dev);
+
icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
@@ -994,6 +1044,12 @@
return ret;
}
+ gtr_dev->saved_regs = devm_kmalloc(gtr_dev->dev,
+ sizeof(save_reg_address),
+ GFP_KERNEL);
+ if (!gtr_dev->saved_regs)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index b7921b5..54301fb 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -709,32 +709,35 @@
{
int err, rsel_val;
- if (!pullup && arg == MTK_DISABLE)
- return 0;
-
if (hw->rsel_si_unit) {
/* find pin rsel_index from pin_rsel array*/
err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val);
if (err)
- goto out;
+ return err;
} else {
- if (arg < MTK_PULL_SET_RSEL_000 ||
- arg > MTK_PULL_SET_RSEL_111) {
- err = -EINVAL;
- goto out;
- }
+ if (arg < MTK_PULL_SET_RSEL_000 || arg > MTK_PULL_SET_RSEL_111)
+ return -EINVAL;
rsel_val = arg - MTK_PULL_SET_RSEL_000;
}
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
- if (err)
- goto out;
+ return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_RSEL, rsel_val);
+}
- err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, MTK_ENABLE);
+static int mtk_pinconf_bias_set_pu_pd_rsel(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 pullup, u32 arg)
+{
+ u32 enable = arg == MTK_DISABLE ? MTK_DISABLE : MTK_ENABLE;
+ int err;
-out:
- return err;
+ if (arg != MTK_DISABLE) {
+ err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
+ if (err)
+ return err;
+ }
+
+ return mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, enable);
}
int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
@@ -750,22 +753,22 @@
try_all_type = MTK_PULL_TYPE_MASK;
if (try_all_type & MTK_PULL_RSEL_TYPE) {
- err = mtk_pinconf_bias_set_rsel(hw, desc, pullup, arg);
+ err = mtk_pinconf_bias_set_pu_pd_rsel(hw, desc, pullup, arg);
if (!err)
- return err;
+ return 0;
}
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
err = mtk_pinconf_bias_set_pu_pd(hw, desc, pullup, arg);
if (!err)
- return err;
+ return 0;
}
if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
err = mtk_pinconf_bias_set_pullsel_pullen(hw, desc,
pullup, arg);
if (!err)
- return err;
+ return 0;
}
if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
@@ -803,9 +806,9 @@
return 0;
}
-static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw,
- const struct mtk_pin_desc *desc,
- u32 *pullup, u32 *enable)
+static int mtk_pinconf_bias_get_pu_pd_rsel(struct mtk_pinctrl *hw,
+ const struct mtk_pin_desc *desc,
+ u32 *pullup, u32 *enable)
{
int pu, pd, rsel, err;
@@ -939,22 +942,22 @@
try_all_type = MTK_PULL_TYPE_MASK;
if (try_all_type & MTK_PULL_RSEL_TYPE) {
- err = mtk_pinconf_bias_get_rsel(hw, desc, pullup, enable);
+ err = mtk_pinconf_bias_get_pu_pd_rsel(hw, desc, pullup, enable);
if (!err)
- return err;
+ return 0;
}
if (try_all_type & MTK_PULL_PU_PD_TYPE) {
err = mtk_pinconf_bias_get_pu_pd(hw, desc, pullup, enable);
if (!err)
- return err;
+ return 0;
}
if (try_all_type & MTK_PULL_PULLSEL_TYPE) {
err = mtk_pinconf_bias_get_pullsel_pullen(hw, desc,
pullup, enable);
if (!err)
- return err;
+ return 0;
}
if (try_all_type & MTK_PULL_PUPD_R1R0_TYPE)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index b3c3f5f..93ab277 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1403,8 +1403,11 @@
/* We will handle a range of GPIO pins */
for (i = 0; i < gpio_banks; i++)
- if (gpio_chips[i])
+ if (gpio_chips[i]) {
pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+ gpiochip_add_pin_range(&gpio_chips[i]->chip, dev_name(info->pctl->dev), 0,
+ gpio_chips[i]->range.pin_base, gpio_chips[i]->range.npins);
+ }
dev_info(dev, "initialized AT91 pinctrl driver\n");
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 0eacaf1..6878bc8 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3795,7 +3795,7 @@
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0,
- 0,
+ IOMUX_WIDTH_2BIT,
IOMUX_WIDTH_3BIT,
0),
PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3",
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 4c6bfab..4da3c3f 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -345,6 +345,8 @@
return -ENOTSUPP;
fselector = setting->func;
function = pinmux_generic_get_function(pctldev, fselector);
+ if (!function)
+ return -EINVAL;
*func = function->data;
if (!(*func)) {
dev_err(pcs->dev, "%s could not find function%i\n",
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index e30e938..65ed933 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1805,26 +1805,29 @@
[235] = PINGROUP(235, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
[236] = PINGROUP(236, aon_cci, qdss_gpio, _, _, _, _, _, _, _),
[237] = PINGROUP(237, _, _, _, _, _, _, _, _, _),
- [238] = UFS_RESET(ufs_reset, 0x1f9000),
- [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1f2000, 14, 6),
- [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1f2000, 11, 3),
- [241] = SDC_QDSD_PINGROUP(sdc2_data, 0x1f2000, 9, 0),
+ [238] = UFS_RESET(ufs_reset, 0xf9000),
+ [239] = SDC_QDSD_PINGROUP(sdc2_clk, 0xf2000, 14, 6),
+ [240] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xf2000, 11, 3),
+ [241] = SDC_QDSD_PINGROUP(sdc2_data, 0xf2000, 9, 0),
};
static const struct msm_gpio_wakeirq_map x1e80100_pdc_map[] = {
{ 0, 72 }, { 2, 70 }, { 3, 71 }, { 6, 123 }, { 7, 67 }, { 11, 85 },
- { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 }, { 26, 129 },
- { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 }, { 32, 145 }, { 33, 115 },
- { 34, 130 }, { 35, 146 }, { 36, 147 }, { 39, 80 }, { 43, 148 }, { 47, 149 },
- { 51, 79 }, { 53, 89 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 },
- { 67, 88 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 },
- { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 }, { 94, 140 },
- { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 }, { 129, 76 }, { 131, 82 },
- { 134, 83 }, { 141, 93 }, { 144, 94 }, { 147, 96 }, { 148, 97 }, { 150, 102 },
- { 151, 103 }, { 153, 104 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 },
- { 172, 99 }, { 181, 101 }, { 184, 116 }, { 193, 40 }, { 193, 117 }, { 196, 108 },
- { 203, 133 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 },
- { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 },
+ { 13, 86 }, { 15, 68 }, { 18, 122 }, { 19, 69 }, { 21, 158 }, { 23, 143 },
+ { 24, 126 }, { 26, 129 }, { 27, 144 }, { 28, 77 }, { 29, 78 }, { 30, 92 },
+ { 31, 159 }, { 32, 145 }, { 33, 115 }, { 34, 130 }, { 35, 146 }, { 36, 147 },
+ { 38, 113 }, { 39, 80 }, { 43, 148 }, { 47, 149 }, { 51, 79 }, { 53, 89 },
+ { 55, 81 }, { 59, 87 }, { 64, 90 }, { 65, 106 }, { 66, 142 }, { 67, 88 },
+ { 68, 151 }, { 71, 91 }, { 75, 152 }, { 79, 153 }, { 80, 125 }, { 81, 128 },
+ { 83, 154 }, { 84, 137 }, { 85, 155 }, { 87, 156 }, { 91, 157 }, { 92, 138 },
+ { 93, 139 }, { 94, 140 }, { 95, 141 }, { 113, 84 }, { 121, 73 }, { 123, 74 },
+ { 125, 75 }, { 129, 76 }, { 131, 82 }, { 134, 83 }, { 141, 93 }, { 144, 94 },
+ { 145, 95 }, { 147, 96 }, { 148, 97 }, { 150, 102 }, { 151, 103 }, { 153, 104 },
+ { 154, 100 }, { 156, 105 }, { 157, 107 }, { 163, 98 }, { 166, 112 }, { 172, 99 },
+ { 175, 114 }, { 181, 101 }, { 184, 116 }, { 193, 117 }, { 196, 108 }, { 203, 133 },
+ { 208, 134 }, { 212, 120 }, { 213, 150 }, { 214, 121 }, { 215, 118 }, { 217, 109 },
+ { 219, 119 }, { 220, 110 }, { 221, 111 }, { 222, 124 }, { 224, 131 }, { 225, 132 },
+ { 228, 135 }, { 230, 136 }, { 232, 162 },
};
static const struct msm_pinctrl_soc_data x1e80100_pinctrl = {
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 4ce080c..1d0d6c2 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -793,12 +793,12 @@
case IRQ_TYPE_LEVEL_HIGH:
irq_type = 0; /* 0: level triggered */
edge_both = 0; /* 0: ignored */
- polarity = mask; /* 1: high level */
+ polarity = 0; /* 0: high level */
break;
case IRQ_TYPE_LEVEL_LOW:
irq_type = 0; /* 0: level triggered */
edge_both = 0; /* 0: ignored */
- polarity = 0; /* 0: low level */
+ polarity = mask; /* 1: low level */
break;
default:
return -EINVAL;
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index c3e51f0..bbb8edb 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -359,6 +359,7 @@
dev->smu_msg = 0x538;
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
dev->num_ips = 22;
dev->s2d_msg_id = 0xDE;
dev->smu_msg = 0x938;
@@ -597,6 +598,7 @@
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
break;
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_1AH);
break;
default:
@@ -630,6 +632,7 @@
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
+ case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
return true;
default:
return false;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index fceffe2..ed3633c 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -145,6 +145,10 @@
.wmi_ignore_fan = true,
};
+static struct quirk_entry quirk_asus_zenbook_duo_kbd = {
+ .ignore_key_wlan = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
pr_info("Identified laptop model '%s'\n", dmi->ident);
@@ -516,6 +520,15 @@
},
.driver_data = &quirk_asus_ignore_fan,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS Zenbook Duo UX8406MA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX8406MA"),
+ },
+ .driver_data = &quirk_asus_zenbook_duo_kbd,
+ },
{},
};
@@ -630,7 +643,12 @@
case 0x32: /* Volume Mute */
if (atkbd_reports_vol_keys)
*code = ASUS_WMI_KEY_IGNORE;
-
+ break;
+ case 0x5D: /* Wireless console Toggle */
+ case 0x5E: /* Wireless console Enable */
+ case 0x5F: /* Wireless console Disable */
+ if (quirks->ignore_key_wlan)
+ *code = ASUS_WMI_KEY_IGNORE;
break;
}
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index cc30f18..d02f15f 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -40,6 +40,7 @@
bool wmi_force_als_set;
bool wmi_ignore_fan;
bool filter_i8042_e1_extended_codes;
+ bool ignore_key_wlan;
enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 141a2d2..387dd09 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -140,7 +140,6 @@
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)&lenovo_yt3_info,
diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c
index d786cbf..7008794 100644
--- a/drivers/power/sequencing/pwrseq-qcom-wcn.c
+++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c
@@ -288,6 +288,13 @@
return dev_err_probe(dev, PTR_ERR(ctx->wlan_gpio),
"Failed to get the WLAN enable GPIO\n");
+ /*
+ * Set direction to output but keep the current value in order to not
+ * disable the WLAN module accidentally if it's already powered on.
+ */
+ gpiod_direction_output(ctx->wlan_gpio,
+ gpiod_get_value_cansleep(ctx->wlan_gpio));
+
ctx->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(ctx->clk))
return dev_err_probe(dev, PTR_ERR(ctx->clk),
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index 49bef4a..8b3df3e 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -1387,12 +1387,16 @@
"failed to register wireless charing power supply\n");
}
- battmgr->client = devm_pmic_glink_register_client(dev,
- PMIC_GLINK_OWNER_BATTMGR,
- qcom_battmgr_callback,
- qcom_battmgr_pdr_notify,
- battmgr);
- return PTR_ERR_OR_ZERO(battmgr->client);
+ battmgr->client = devm_pmic_glink_client_alloc(dev, PMIC_GLINK_OWNER_BATTMGR,
+ qcom_battmgr_callback,
+ qcom_battmgr_pdr_notify,
+ battmgr);
+ if (IS_ERR(battmgr->client))
+ return PTR_ERR(battmgr->client);
+
+ pmic_glink_client_register(battmgr->client);
+
+ return 0;
}
static const struct auxiliary_device_id qcom_battmgr_id_table[] = {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index bd99c54..0f64b02 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -642,6 +642,7 @@
if (aac_comm_init(dev)<0){
kfree(dev->queues);
+ dev->queues = NULL;
return NULL;
}
/*
@@ -649,6 +650,7 @@
*/
if (aac_fib_setup(dev) < 0) {
kfree(dev->queues);
+ dev->queues = NULL;
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 4156419..4756a3f 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5410,7 +5410,7 @@
struct get_cgnbuf_info_req *cgnbuf_req;
struct lpfc_cgn_info *cp;
uint8_t *cgn_buff;
- int size, cinfosz;
+ size_t size, cinfosz;
int rc = 0;
if (job->request_len < sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index dad3991..9db8694 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1823,13 +1823,15 @@
(sshdr.asc == 0x74 && sshdr.ascq == 0x71)) /* drive is password locked */
/* this is no error here */
return 0;
+
/*
- * This drive doesn't support sync and there's not much
- * we can do because this is called during shutdown
- * or suspend so just return success so those operations
- * can proceed.
+ * If a format is in progress or if the drive does not
+ * support sync, there is not much we can do because
+ * this is called during shutdown or suspend so just
+ * return success so those operations can proceed.
*/
- if (sshdr.sense_key == ILLEGAL_REQUEST)
+ if ((sshdr.asc == 0x04 && sshdr.ascq == 0x04) ||
+ sshdr.sense_key == ILLEGAL_REQUEST)
return 0;
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 7f02f05..74b9121 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -77,7 +77,7 @@
select QCOM_QMI_HELPERS
select QCOM_PDR_MSG
select AUXILIARY_BUS
- depends on NET && QRTR
+ depends on NET && QRTR && (ARCH_QCOM || COMPILE_TEST)
default QCOM_RPROC_COMMON
help
The Protection Domain Mapper maps registered services to the domains
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index d845726..ae66c26 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -349,7 +349,7 @@
return -EINVAL;
}
- cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WC);
if (!cmd_db_header) {
ret = -ENOMEM;
cmd_db_header = NULL;
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 9ebc0ba..96062229 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -66,15 +66,14 @@
spin_unlock_irqrestore(&pg->client_lock, flags);
}
-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
- unsigned int id,
- void (*cb)(const void *, size_t, void *),
- void (*pdr)(void *, int),
- void *priv)
+struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv)
{
struct pmic_glink_client *client;
struct pmic_glink *pg = dev_get_drvdata(dev->parent);
- unsigned long flags;
client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL);
if (!client)
@@ -85,6 +84,18 @@
client->cb = cb;
client->pdr_notify = pdr;
client->priv = priv;
+ INIT_LIST_HEAD(&client->node);
+
+ devres_add(dev, client);
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(devm_pmic_glink_client_alloc);
+
+void pmic_glink_client_register(struct pmic_glink_client *client)
+{
+ struct pmic_glink *pg = client->pg;
+ unsigned long flags;
mutex_lock(&pg->state_lock);
spin_lock_irqsave(&pg->client_lock, flags);
@@ -95,17 +106,22 @@
spin_unlock_irqrestore(&pg->client_lock, flags);
mutex_unlock(&pg->state_lock);
- devres_add(dev, client);
-
- return client;
}
-EXPORT_SYMBOL_GPL(devm_pmic_glink_register_client);
+EXPORT_SYMBOL_GPL(pmic_glink_client_register);
int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len)
{
struct pmic_glink *pg = client->pg;
+ int ret;
- return rpmsg_send(pg->ept, data, len);
+ mutex_lock(&pg->state_lock);
+ if (!pg->ept)
+ ret = -ECONNRESET;
+ else
+ ret = rpmsg_send(pg->ept, data, len);
+ mutex_unlock(&pg->state_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pmic_glink_send);
@@ -175,7 +191,7 @@
if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
new_state = SERVREG_SERVICE_STATE_UP;
} else {
- if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
+ if (pg->pdr_state == SERVREG_SERVICE_STATE_DOWN || !pg->ept)
new_state = SERVREG_SERVICE_STATE_DOWN;
}
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 1e0808b..463b1c5 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -520,12 +520,17 @@
return ret;
}
- altmode->client = devm_pmic_glink_register_client(dev,
- altmode->owner_id,
- pmic_glink_altmode_callback,
- pmic_glink_altmode_pdr_notify,
- altmode);
- return PTR_ERR_OR_ZERO(altmode->client);
+ altmode->client = devm_pmic_glink_client_alloc(dev,
+ altmode->owner_id,
+ pmic_glink_altmode_callback,
+ pmic_glink_altmode_pdr_notify,
+ altmode);
+ if (IS_ERR(altmode->client))
+ return PTR_ERR(altmode->client);
+
+ pmic_glink_client_register(altmode->client);
+
+ return 0;
}
static const struct auxiliary_device_id pmic_glink_altmode_id_table[] = {
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index a4c0070..2228595 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -517,7 +517,7 @@
NULL,
};
-static const struct of_device_id qcom_pdm_domains[] = {
+static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,apq8064", .data = NULL, },
{ .compatible = "qcom,apq8074", .data = NULL, },
{ .compatible = "qcom,apq8084", .data = NULL, },
@@ -635,6 +635,8 @@
ret = PTR_ERR(data);
else
__qcom_pdm_data = data;
+ } else {
+ refcount_inc(&__qcom_pdm_data->refcnt);
}
auxiliary_set_drvdata(auxdev, __qcom_pdm_data);
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 7aa4900..f275143 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1291,18 +1291,18 @@
unsigned int port_num)
{
struct sdw_dpn_prop *dpn_prop;
- u8 num_ports;
+ unsigned long mask;
int i;
if (direction == SDW_DATA_DIR_TX) {
- num_ports = hweight32(slave->prop.source_ports);
+ mask = slave->prop.source_ports;
dpn_prop = slave->prop.src_dpn_prop;
} else {
- num_ports = hweight32(slave->prop.sink_ports);
+ mask = slave->prop.sink_ports;
dpn_prop = slave->prop.sink_dpn_prop;
}
- for (i = 0; i < num_ports; i++) {
+ for_each_set_bit(i, &mask, 32) {
if (dpn_prop[i].num == port_num)
return &dpn_prop[i];
}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index dbee6f0..84887df 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -811,6 +811,7 @@
* generate Missed Service Error Event.
* Set skip flag when receive a Missed Service Error Event and
* process the missed tds on the endpoint ring.
+ * @wa1_nop_trb: hold pointer to NOP trb.
*/
struct cdnsp_ep {
struct usb_ep endpoint;
@@ -838,6 +839,8 @@
#define EP_UNCONFIGURED BIT(7)
bool skip;
+ union cdnsp_trb *wa1_nop_trb;
+
};
/**
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 02f297f..dbd83d3 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -402,7 +402,7 @@
struct cdnsp_stream_ctx *st_ctx;
struct cdnsp_ep *pep;
- pep = &pdev->eps[stream_id];
+ pep = &pdev->eps[ep_index];
if (pep->ep_state & EP_HAS_STREAMS) {
st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
@@ -1905,6 +1905,23 @@
return ret;
/*
+ * workaround 1: STOP EP command on LINK TRB with TC bit set to 1
+ * causes that internal cycle bit can have incorrect state after
+ * command complete. In consequence empty transfer ring can be
+ * incorrectly detected when EP is resumed.
+ * NOP TRB before LINK TRB avoid such scenario. STOP EP command is
+ * then on NOP TRB and internal cycle bit is not changed and have
+ * correct value.
+ */
+ if (pep->wa1_nop_trb) {
+ field = le32_to_cpu(pep->wa1_nop_trb->trans_event.flags);
+ field ^= TRB_CYCLE;
+
+ pep->wa1_nop_trb->trans_event.flags = cpu_to_le32(field);
+ pep->wa1_nop_trb = NULL;
+ }
+
+ /*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
@@ -1999,6 +2016,17 @@
send_addr = addr;
}
+ if (cdnsp_trb_is_link(ring->enqueue + 1)) {
+ field = TRB_TYPE(TRB_TR_NOOP) | TRB_IOC;
+ if (!ring->cycle_state)
+ field |= TRB_CYCLE;
+
+ pep->wa1_nop_trb = ring->enqueue;
+
+ cdnsp_queue_trb(pdev, ring, 0, 0x0, 0x0,
+ TRB_INTR_TARGET(0), field);
+ }
+
cdnsp_check_trb_math(preq, enqd_len);
ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
start_cycle, start_trb);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0e7439d..0c1b69d 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1761,6 +1761,9 @@
{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
.driver_info = SINGLE_RX_URB,
},
+ { USB_DEVICE(0x1901, 0x0006), /* GE Healthcare Patient Monitor UI Controller */
+ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
+ },
{ USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d83231d..61b6d97 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -670,6 +670,7 @@
static void remove_power_attributes(struct device *dev)
{
+ sysfs_unmerge_group(&dev->kobj, &usb3_hardware_lpm_attr_group);
sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
sysfs_unmerge_group(&dev->kobj, &power_attr_group);
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 734de2a..ccc3895 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -564,9 +564,17 @@
void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
+ u32 reg;
if (!dwc->ev_buf)
return;
+ /*
+ * Exynos platforms may not be able to access event buffer if the
+ * controller failed to halt on dwc3_core_exit().
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_DEVCTRLHLT))
+ return;
evt = dwc->ev_buf;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index d5c77db..2a11fc0 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -522,11 +522,13 @@
if (ret) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
- goto err1;
+ goto err2;
}
dwc3_omap_enable_irqs(omap);
return 0;
+err2:
+ of_platform_depopulate(dev);
err1:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 211360e..c8c7cd0 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -219,10 +219,8 @@
dwc3_data->regmap = regmap;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg");
- if (!res) {
- ret = -ENXIO;
- goto undo_platform_dev_alloc;
- }
+ if (!res)
+ return -ENXIO;
dwc3_data->syscfg_reg_off = res->start;
@@ -233,8 +231,7 @@
devm_reset_control_get_exclusive(dev, "powerdown");
if (IS_ERR(dwc3_data->rstc_pwrdn)) {
dev_err(&pdev->dev, "could not get power controller\n");
- ret = PTR_ERR(dwc3_data->rstc_pwrdn);
- goto undo_platform_dev_alloc;
+ return PTR_ERR(dwc3_data->rstc_pwrdn);
}
/* Manage PowerDown */
@@ -269,7 +266,7 @@
if (!child_pdev) {
dev_err(dev, "failed to find dwc3 core device\n");
ret = -ENODEV;
- goto err_node_put;
+ goto depopulate;
}
dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
@@ -285,6 +282,7 @@
ret = st_dwc3_drd_init(dwc3_data);
if (ret) {
dev_err(dev, "drd initialisation failed\n");
+ of_platform_depopulate(dev);
goto undo_softreset;
}
@@ -294,14 +292,14 @@
platform_set_drvdata(pdev, dwc3_data);
return 0;
+depopulate:
+ of_platform_depopulate(dev);
err_node_put:
of_node_put(child);
undo_softreset:
reset_control_assert(dwc3_data->rstc_rst);
undo_powerdown:
reset_control_assert(dwc3_data->rstc_pwrdn);
-undo_platform_dev_alloc:
- platform_device_put(pdev);
return ret;
}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index bb4d894..f1298b1 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -327,9 +327,14 @@
goto err_pm_set_suspended;
pm_suspend_ignore_children(dev, false);
- return pm_runtime_resume_and_get(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto err_pm_set_suspended;
+
+ return 0;
err_pm_set_suspended:
+ of_platform_depopulate(dev);
pm_runtime_set_suspended(dev);
err_clk_put:
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index d96ffbe..c9533a9 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -232,7 +232,8 @@
/* stall is always issued on EP0 */
dep = dwc->eps[0];
__dwc3_gadget_ep_set_halt(dep, 1, false);
- dep->flags = DWC3_EP_ENABLED;
+ dep->flags &= DWC3_EP_RESOURCE_ALLOCATED;
+ dep->flags |= DWC3_EP_ENABLED;
dwc->delayed_status = false;
if (!list_empty(&dep->pending_list)) {
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index d41f5f3..a9edd60 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -753,6 +753,7 @@
video->req_int_count = 0;
uvc_video_ep_queue_initial_requests(video);
+ queue_work(video->async_wq, &video->pump);
return ret;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 311040f..176f387 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -619,6 +619,8 @@
/* MeiG Smart Technology products */
#define MEIGSMART_VENDOR_ID 0x2dee
+/* MeiG Smart SRM825L based on Qualcomm 315 */
+#define MEIGSMART_PRODUCT_SRM825L 0x4d22
/* MeiG Smart SLM320 based on UNISOC UIS8910 */
#define MEIGSMART_PRODUCT_SLM320 0x4d41
@@ -2366,6 +2368,9 @@
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
index cd23533..f71dba8 100644
--- a/drivers/usb/typec/mux/fsa4480.c
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -274,7 +274,7 @@
return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n");
ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val);
- if (ret || !val)
+ if (ret)
return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n");
dev_dbg(dev, "Found FSA4480 v%lu.%lu (Vendor ID = %lu)\n",
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 16c3284..6aace19 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -68,6 +68,9 @@
struct work_struct notify_work;
struct work_struct register_work;
+ spinlock_t state_lock;
+ bool ucsi_registered;
+ bool pd_running;
u8 read_buf[UCSI_BUF_SIZE];
};
@@ -244,8 +247,20 @@
static void pmic_glink_ucsi_register(struct work_struct *work)
{
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
+ unsigned long flags;
+ bool pd_running;
- ucsi_register(ucsi->ucsi);
+ spin_lock_irqsave(&ucsi->state_lock, flags);
+ pd_running = ucsi->pd_running;
+ spin_unlock_irqrestore(&ucsi->state_lock, flags);
+
+ if (!ucsi->ucsi_registered && pd_running) {
+ ucsi_register(ucsi->ucsi);
+ ucsi->ucsi_registered = true;
+ } else if (ucsi->ucsi_registered && !pd_running) {
+ ucsi_unregister(ucsi->ucsi);
+ ucsi->ucsi_registered = false;
+ }
}
static void pmic_glink_ucsi_callback(const void *data, size_t len, void *priv)
@@ -269,11 +284,12 @@
static void pmic_glink_ucsi_pdr_notify(void *priv, int state)
{
struct pmic_glink_ucsi *ucsi = priv;
+ unsigned long flags;
- if (state == SERVREG_SERVICE_STATE_UP)
- schedule_work(&ucsi->register_work);
- else if (state == SERVREG_SERVICE_STATE_DOWN)
- ucsi_unregister(ucsi->ucsi);
+ spin_lock_irqsave(&ucsi->state_lock, flags);
+ ucsi->pd_running = (state == SERVREG_SERVICE_STATE_UP);
+ spin_unlock_irqrestore(&ucsi->state_lock, flags);
+ schedule_work(&ucsi->register_work);
}
static void pmic_glink_ucsi_destroy(void *data)
@@ -320,6 +336,7 @@
INIT_WORK(&ucsi->register_work, pmic_glink_ucsi_register);
init_completion(&ucsi->read_ack);
init_completion(&ucsi->write_ack);
+ spin_lock_init(&ucsi->state_lock);
mutex_init(&ucsi->lock);
ucsi->ucsi = ucsi_create(dev, &pmic_glink_ucsi_ops);
@@ -367,12 +384,16 @@
ucsi->port_orientation[port] = desc;
}
- ucsi->client = devm_pmic_glink_register_client(dev,
- PMIC_GLINK_OWNER_USBC,
- pmic_glink_ucsi_callback,
- pmic_glink_ucsi_pdr_notify,
- ucsi);
- return PTR_ERR_OR_ZERO(ucsi->client);
+ ucsi->client = devm_pmic_glink_client_alloc(dev, PMIC_GLINK_OWNER_USBC,
+ pmic_glink_ucsi_callback,
+ pmic_glink_ucsi_pdr_notify,
+ ucsi);
+ if (IS_ERR(ucsi->client))
+ return PTR_ERR(ucsi->client);
+
+ pmic_glink_client_register(ucsi->client);
+
+ return 0;
}
static void pmic_glink_ucsi_remove(struct auxiliary_device *adev)
diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c
index 561be8f..2b5a1e6 100644
--- a/drivers/video/aperture.c
+++ b/drivers/video/aperture.c
@@ -293,7 +293,7 @@
* ask for this, so let's assume that a real driver for the display
* was already probed and prevent sysfb to register devices later.
*/
- sysfb_disable();
+ sysfb_disable(NULL);
aperture_detach_devices(base, size);
@@ -346,15 +346,10 @@
*/
int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
{
- bool primary = false;
resource_size_t base, size;
int bar, ret = 0;
- if (pdev == vga_default_device())
- primary = true;
-
- if (primary)
- sysfb_disable();
+ sysfb_disable(&pdev->dev);
for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
@@ -370,7 +365,7 @@
* that consumes the VGA framebuffer I/O range. Remove this
* device as well.
*/
- if (primary)
+ if (pdev == vga_default_device())
ret = __aperture_remove_legacy_vga_devices(pdev);
return ret;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 3acf5e0..a95e776 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -695,13 +695,18 @@
{
struct afs_vnode_param *vp = &op->file[0];
struct afs_vnode *vnode = vp->vnode;
+ struct inode *inode = &vnode->netfs.inode;
if (op->setattr.attr->ia_valid & ATTR_SIZE) {
loff_t size = op->setattr.attr->ia_size;
- loff_t i_size = op->setattr.old_i_size;
+ loff_t old = op->setattr.old_i_size;
- if (size != i_size) {
- truncate_setsize(&vnode->netfs.inode, size);
+ /* Note: inode->i_size was updated by afs_apply_status() inside
+ * the I/O and callback locks.
+ */
+
+ if (size != old) {
+ truncate_pagecache(inode, size);
netfs_resize_file(&vnode->netfs, size, true);
fscache_resize_cookie(afs_vnode_cache(vnode), size);
}
diff --git a/fs/attr.c b/fs/attr.c
index 825007d..c04d19b 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -487,9 +487,17 @@
error = security_inode_setattr(idmap, dentry, attr);
if (error)
return error;
- error = try_break_deleg(inode, delegated_inode);
- if (error)
- return error;
+
+ /*
+ * If ATTR_DELEG is set, then these attributes are being set on
+ * behalf of the holder of a write delegation. We want to avoid
+ * breaking the delegation in this case.
+ */
+ if (!(ia_valid & ATTR_DELEG)) {
+ error = try_break_deleg(inode, delegated_inode);
+ if (error)
+ return error;
+ }
if (inode->i_op->setattr)
error = inode->i_op->setattr(idmap, dentry, attr);
diff --git a/fs/backing-file.c b/fs/backing-file.c
index afb5574..8860dac 100644
--- a/fs/backing-file.c
+++ b/fs/backing-file.c
@@ -303,13 +303,16 @@
if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING)))
return -EIO;
+ if (!out->f_op->splice_write)
+ return -EINVAL;
+
ret = file_remove_privs(ctx->user_file);
if (ret)
return ret;
old_cred = override_creds(ctx->cred);
file_start_write(out);
- ret = iter_file_splice_write(pipe, out, ppos, len, flags);
+ ret = out->f_op->splice_write(pipe, out, ppos, len, flags);
file_end_write(out);
revert_creds(old_cred);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 28a3439..4fe5bb9 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -589,6 +589,9 @@
if (bprm->have_execfd)
nitems++;
+#ifdef ELF_HWCAP2
+ nitems++;
+#endif
csp = sp;
sp -= nitems * 2 * sizeof(unsigned long);
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index f04d931..b4e31ae 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -668,7 +668,6 @@
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = bbio->fs_info;
- struct btrfs_bio *orig_bbio = bbio;
struct bio *bio = &bbio->bio;
u64 logical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 length = bio->bi_iter.bi_size;
@@ -706,7 +705,7 @@
bbio->saved_iter = bio->bi_iter;
ret = btrfs_lookup_bio_sums(bbio);
if (ret)
- goto fail_put_bio;
+ goto fail;
}
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
@@ -740,13 +739,13 @@
ret = btrfs_bio_csum(bbio);
if (ret)
- goto fail_put_bio;
+ goto fail;
} else if (use_append ||
(btrfs_is_zoned(fs_info) && inode &&
inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio);
if (ret)
- goto fail_put_bio;
+ goto fail;
}
}
@@ -754,12 +753,23 @@
done:
return map_length == length;
-fail_put_bio:
- if (map_length < length)
- btrfs_cleanup_bio(bbio);
fail:
btrfs_bio_counter_dec(fs_info);
- btrfs_bio_end_io(orig_bbio, ret);
+ /*
+ * We have split the original bbio, now we have to end both the current
+ * @bbio and remaining one, as the remaining one will never be submitted.
+ */
+ if (map_length < length) {
+ struct btrfs_bio *remaining = bbio->private;
+
+ ASSERT(bbio->bio.bi_pool == &btrfs_clone_bioset);
+ ASSERT(remaining);
+
+ remaining->bio.bi_status = ret;
+ btrfs_orig_bbio_end_io(remaining);
+ }
+ bbio->bio.bi_status = ret;
+ btrfs_orig_bbio_end_io(bbio);
/* Do not submit another chunk */
return true;
}
diff --git a/fs/btrfs/fiemap.c b/fs/btrfs/fiemap.c
index 8f95f3e..df7f09f 100644
--- a/fs/btrfs/fiemap.c
+++ b/fs/btrfs/fiemap.c
@@ -637,7 +637,7 @@
struct btrfs_path *path;
struct fiemap_cache cache = { 0 };
struct btrfs_backref_share_check_ctx *backref_ctx;
- u64 last_extent_end;
+ u64 last_extent_end = 0;
u64 prev_extent_end;
u64 range_start;
u64 range_end;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5d57a28..7d6f5d9 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -4185,6 +4185,8 @@
return 0;
}
+ btrfs_run_delayed_iputs(root->fs_info);
+ btrfs_wait_on_delayed_iputs(root->fs_info);
ret = btrfs_start_delalloc_snapshot(root, true);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 68e14fd..c691784b 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -1985,8 +1985,8 @@
return unalloc < data_chunk_size;
}
-static int do_reclaim_sweep(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info, int raid)
+static void do_reclaim_sweep(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info, int raid)
{
struct btrfs_block_group *bg;
int thresh_pct;
@@ -2031,7 +2031,6 @@
}
up_read(&space_info->groups_sem);
- return 0;
}
void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
@@ -2074,21 +2073,15 @@
return ret;
}
-int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
+void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info)
{
- int ret;
int raid;
struct btrfs_space_info *space_info;
list_for_each_entry(space_info, &fs_info->space_info, list) {
if (!btrfs_should_periodic_reclaim(space_info))
continue;
- for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
- ret = do_reclaim_sweep(fs_info, space_info, raid);
- if (ret)
- return ret;
- }
+ for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
+ do_reclaim_sweep(fs_info, space_info, raid);
}
-
- return ret;
}
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 88b4422..5602026 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -294,6 +294,6 @@
void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready);
bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info);
int btrfs_calc_reclaim_threshold(struct btrfs_space_info *space_info);
-int btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
+void btrfs_reclaim_sweep(struct btrfs_fs_info *fs_info);
#endif /* BTRFS_SPACE_INFO_H */
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 71cd705..4a8eec4 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -695,6 +695,7 @@
percpu_counter_dec(&mdsc->metric.total_inodes);
+ netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
if (inode->i_state & I_PINNING_NETFS_WB)
ceph_fscache_unuse_cookie(inode, true);
diff --git a/fs/dcache.c b/fs/dcache.c
index 3d8daae..6386b9b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -96,11 +96,16 @@
*
* This hash-function tries to avoid losing too many bits of hash
* information, yet avoid using a prime hash-size or similar.
+ *
+ * Marking the variables "used" ensures that the compiler doesn't
+ * optimize them away completely on architectures with runtime
+ * constant infrastructure, this allows debuggers to see their
+ * values. But updating these values has no effect on those arches.
*/
-static unsigned int d_hash_shift __ro_after_init;
+static unsigned int d_hash_shift __ro_after_init __used;
-static struct hlist_bl_head *dentry_hashtable __ro_after_init;
+static struct hlist_bl_head *dentry_hashtable __ro_after_init __used;
static inline struct hlist_bl_head *d_hash(unsigned long hashlen)
{
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 5367caf..4da0a49 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -313,6 +313,7 @@
netfs_reset_subreq_iter(rreq, subreq);
netfs_read_from_server(rreq, subreq);
} else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
+ netfs_reset_subreq_iter(rreq, subreq);
netfs_rreq_short_read(rreq, subreq);
}
}
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 83e644b..c1f321c 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -97,10 +97,22 @@
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
struct netfs_folio *finfo;
+ struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio->index, offset, length);
+ if (offset == 0 && length == flen) {
+ unsigned long long i_size = i_size_read(&ctx->inode);
+ unsigned long long fpos = folio_pos(folio), end;
+
+ end = umin(fpos + flen, i_size);
+ if (fpos < i_size && end > ctx->zero_point)
+ ctx->zero_point = end;
+ }
+
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+
if (!folio_test_private(folio))
return;
@@ -113,18 +125,34 @@
/* We have a partially uptodate page from a streaming write. */
unsigned int fstart = finfo->dirty_offset;
unsigned int fend = fstart + finfo->dirty_len;
- unsigned int end = offset + length;
+ unsigned int iend = offset + length;
if (offset >= fend)
return;
- if (end <= fstart)
+ if (iend <= fstart)
return;
- if (offset <= fstart && end >= fend)
- goto erase_completely;
- if (offset <= fstart && end > fstart)
- goto reduce_len;
- if (offset > fstart && end >= fend)
- goto move_start;
+
+ /* The invalidation region overlaps the data. If the region
+ * covers the start of the data, we either move along the start
+ * or just erase the data entirely.
+ */
+ if (offset <= fstart) {
+ if (iend >= fend)
+ goto erase_completely;
+ /* Move the start of the data. */
+ finfo->dirty_len = fend - iend;
+ finfo->dirty_offset = offset;
+ return;
+ }
+
+ /* Reduce the length of the data if the invalidation region
+ * covers the tail part.
+ */
+ if (iend >= fend) {
+ finfo->dirty_len = offset - fstart;
+ return;
+ }
+
/* A partial write was split. The caller has already zeroed
* it, so just absorb the hole.
*/
@@ -137,12 +165,6 @@
folio_clear_uptodate(folio);
kfree(finfo);
return;
-reduce_len:
- finfo->dirty_len = offset + length - finfo->dirty_offset;
- return;
-move_start:
- finfo->dirty_len -= offset - finfo->dirty_offset;
- finfo->dirty_offset = offset;
}
EXPORT_SYMBOL(netfs_invalidate_folio);
@@ -159,12 +181,20 @@
struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
unsigned long long end;
- end = folio_pos(folio) + folio_size(folio);
+ if (folio_test_dirty(folio))
+ return false;
+
+ end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode));
if (end > ctx->zero_point)
ctx->zero_point = end;
if (folio_test_private(folio))
return false;
+ if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ folio_wait_private_2(folio);
+ }
fscache_note_page_release(netfs_i_cookie(ctx));
return true;
}
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 426cf87..ae7a204 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -33,6 +33,7 @@
int netfs_folio_written_back(struct folio *folio)
{
enum netfs_folio_trace why = netfs_folio_trace_clear;
+ struct netfs_inode *ictx = netfs_inode(folio->mapping->host);
struct netfs_folio *finfo;
struct netfs_group *group = NULL;
int gcount = 0;
@@ -41,6 +42,12 @@
/* Streaming writes cannot be redirtied whilst under writeback,
* so discard the streaming record.
*/
+ unsigned long long fend;
+
+ fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len;
+ if (fend > ictx->zero_point)
+ ictx->zero_point = fend;
+
folio_detach_private(folio);
group = finfo->netfs_group;
gcount++;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a20c2c9..a366fb1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2789,15 +2789,18 @@
deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
- spin_lock(&nf->fi_lock);
- file = find_any_file_locked(nf);
- if (file) {
- nfs4_show_superblock(s, file);
- seq_puts(s, ", ");
- nfs4_show_fname(s, file);
- seq_puts(s, ", ");
- }
- spin_unlock(&nf->fi_lock);
+ if (nf) {
+ spin_lock(&nf->fi_lock);
+ file = find_any_file_locked(nf);
+ if (file) {
+ nfs4_show_superblock(s, file);
+ seq_puts(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_puts(s, ", ");
+ }
+ spin_unlock(&nf->fi_lock);
+ } else
+ seq_puts(s, "closed, ");
nfs4_show_owner(s, oo);
if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
seq_puts(s, ", admin-revoked");
@@ -3075,9 +3078,9 @@
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- nfs4_put_stid(&dp->dl_stid);
clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
+ nfs4_put_stid(&dp->dl_stid);
}
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
@@ -8812,7 +8815,7 @@
/**
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
- * @inode: file to be checked for a conflict
+ * @dentry: dentry of inode to be checked for a conflict
* @modified: return true if file was modified
* @size: new size of file if modified is true
*
@@ -8827,16 +8830,16 @@
* code is returned.
*/
__be32
-nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
bool *modified, u64 *size)
{
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct file_lock_context *ctx;
struct file_lease *fl;
- struct nfs4_delegation *dp;
struct iattr attrs;
struct nfs4_cb_fattr *ncf;
+ struct inode *inode = d_inode(dentry);
*modified = false;
ctx = locks_inode_context(inode);
@@ -8856,17 +8859,26 @@
*/
if (type == F_RDLCK)
break;
- goto break_lease;
+
+ nfsd_stats_wdeleg_getattr_inc(nn);
+ spin_unlock(&ctx->flc_lock);
+
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ return status;
+ return 0;
}
if (type == F_WRLCK) {
- dp = fl->c.flc_owner;
+ struct nfs4_delegation *dp = fl->c.flc_owner;
+
if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
spin_unlock(&ctx->flc_lock);
return 0;
}
-break_lease:
nfsd_stats_wdeleg_getattr_inc(nn);
dp = fl->c.flc_owner;
+ refcount_inc(&dp->dl_stid.sc_count);
ncf = &dp->dl_cb_fattr;
nfs4_cb_getattr(&dp->dl_cb_fattr);
spin_unlock(&ctx->flc_lock);
@@ -8876,27 +8888,37 @@
/* Recall delegation only if client didn't respond */
status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
if (status != nfserr_jukebox ||
- !nfsd_wait_for_delegreturn(rqstp, inode))
+ !nfsd_wait_for_delegreturn(rqstp, inode)) {
+ nfs4_put_stid(&dp->dl_stid);
return status;
+ }
}
if (!ncf->ncf_file_modified &&
(ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
ncf->ncf_file_modified = true;
if (ncf->ncf_file_modified) {
+ int err;
+
/*
* Per section 10.4.3 of RFC 8881, the server would
* not update the file's metadata with the client's
* modified size
*/
attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
- attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
- setattr_copy(&nop_mnt_idmap, inode, &attrs);
- mark_inode_dirty(inode);
+ attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG;
+ inode_lock(inode);
+ err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ if (err) {
+ nfs4_put_stid(&dp->dl_stid);
+ return nfserrno(err);
+ }
ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
*size = ncf->ncf_cur_fsize;
*modified = true;
}
+ nfs4_put_stid(&dp->dl_stid);
return 0;
}
break;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 42b41d5..97f5837 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3545,6 +3545,9 @@
args.dentry = dentry;
args.ignore_crossmnt = (ignore_crossmnt != 0);
args.acl = NULL;
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ args.context = NULL;
+#endif
/*
* Make a local copy of the attribute bitmap that can be modified.
@@ -3562,7 +3565,7 @@
}
args.size = 0;
if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
- status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
+ status = nfsd4_deleg_getattr_conflict(rqstp, dentry,
&file_modified, &size);
if (status)
goto out;
@@ -3617,7 +3620,6 @@
args.contextsupport = false;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- args.context = NULL;
if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index ffc2170..ec4559e 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -781,5 +781,5 @@
}
extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
- struct inode *inode, bool *file_modified, u64 *size);
+ struct dentry *dentry, bool *file_modified, u64 *size);
#endif /* NFSD4_STATE_H */
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index 4860fcc..d0568c0 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -353,6 +353,8 @@
case Opt_datadir_add:
ctx->nr_data++;
fallthrough;
+ case Opt_lowerdir:
+ fallthrough;
case Opt_lowerdir_add:
WARN_ON(ctx->nr >= ctx->capacity);
l = &ctx->lower[ctx->nr++];
@@ -365,10 +367,9 @@
}
}
-static int ovl_parse_layer(struct fs_context *fc, struct fs_parameter *param,
- enum ovl_opt layer)
+static int ovl_parse_layer(struct fs_context *fc, const char *layer_name, enum ovl_opt layer)
{
- char *name = kstrdup(param->string, GFP_KERNEL);
+ char *name = kstrdup(layer_name, GFP_KERNEL);
bool upper = (layer == Opt_upperdir || layer == Opt_workdir);
struct path path;
int err;
@@ -376,7 +377,7 @@
if (!name)
return -ENOMEM;
- if (upper)
+ if (upper || layer == Opt_lowerdir)
err = ovl_mount_dir(name, &path);
else
err = ovl_mount_dir_noesc(name, &path);
@@ -432,7 +433,6 @@
{
int err;
struct ovl_fs_context *ctx = fc->fs_private;
- struct ovl_fs_context_layer *l;
char *dup = NULL, *iter;
ssize_t nr_lower, nr;
bool data_layer = false;
@@ -449,7 +449,7 @@
return 0;
if (*name == ':') {
- pr_err("cannot append lower layer");
+ pr_err("cannot append lower layer\n");
return -EINVAL;
}
@@ -472,35 +472,11 @@
goto out_err;
}
- if (nr_lower > ctx->capacity) {
- err = -ENOMEM;
- l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower),
- GFP_KERNEL_ACCOUNT);
- if (!l)
- goto out_err;
-
- ctx->lower = l;
- ctx->capacity = nr_lower;
- }
-
iter = dup;
- l = ctx->lower;
- for (nr = 0; nr < nr_lower; nr++, l++) {
- ctx->nr++;
- memset(l, 0, sizeof(*l));
-
- err = ovl_mount_dir(iter, &l->path);
+ for (nr = 0; nr < nr_lower; nr++) {
+ err = ovl_parse_layer(fc, iter, Opt_lowerdir);
if (err)
- goto out_put;
-
- err = ovl_mount_dir_check(fc, &l->path, Opt_lowerdir, iter, false);
- if (err)
- goto out_put;
-
- err = -ENOMEM;
- l->name = kstrdup(iter, GFP_KERNEL_ACCOUNT);
- if (!l->name)
- goto out_put;
+ goto out_err;
if (data_layer)
ctx->nr_data++;
@@ -517,8 +493,8 @@
* there are no data layers.
*/
if (ctx->nr_data > 0) {
- pr_err("regular lower layers cannot follow data lower layers");
- goto out_put;
+ pr_err("regular lower layers cannot follow data lower layers\n");
+ goto out_err;
}
data_layer = false;
@@ -532,9 +508,6 @@
kfree(dup);
return 0;
-out_put:
- ovl_reset_lowerdirs(ctx);
-
out_err:
kfree(dup);
@@ -582,7 +555,7 @@
case Opt_datadir_add:
case Opt_upperdir:
case Opt_workdir:
- err = ovl_parse_layer(fc, param, opt);
+ err = ovl_parse_layer(fc, param->string, opt);
break;
case Opt_default_permissions:
config->default_permissions = true;
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 68758b6..0addcc8 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -126,7 +126,7 @@
}
}
- buf = folio_zero_tail(folio, fillsize, buf);
+ buf = folio_zero_tail(folio, fillsize, buf + fillsize);
kunmap_local(buf);
folio_end_read(folio, ret == 0);
return ret;
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 7ebe80a..f379b9d 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -254,7 +254,6 @@
struct smb_rqst {
struct kvec *rq_iov; /* array of kvecs */
unsigned int rq_nvec; /* number of kvecs in array */
- size_t rq_iter_size; /* Amount of data in ->rq_iter */
struct iov_iter rq_iter; /* Data iterator */
struct xarray rq_buffer; /* Page buffer for encryption */
};
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 595c4b67..6dce70f 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1713,7 +1713,6 @@
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
rqst.rq_iter = wdata->subreq.io_iter;
- rqst.rq_iter_size = iov_iter_count(&wdata->subreq.io_iter);
cifs_dbg(FYI, "async write at %llu %zu bytes\n",
wdata->subreq.start, wdata->subreq.len);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 322cabc..0b9cb1a 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -3305,6 +3305,7 @@
struct inode *inode = file_inode(file);
struct cifsFileInfo *cfile = file->private_data;
struct file_zero_data_information fsctl_buf;
+ unsigned long long end = offset + len, i_size, remote_i_size;
long rc;
unsigned int xid;
__u8 set_sparse = 1;
@@ -3336,6 +3337,27 @@
(char *)&fsctl_buf,
sizeof(struct file_zero_data_information),
CIFSMaxBufSize, NULL, NULL);
+
+ if (rc)
+ goto unlock;
+
+ /* If there's dirty data in the buffer that would extend the EOF if it
+ * were written, then we need to move the EOF marker over to the lower
+ * of the high end of the hole and the proposed EOF. The problem is
+ * that we locally hole-punch the tail of the dirty data, the proposed
+ * EOF update will end up in the wrong place.
+ */
+ i_size = i_size_read(inode);
+ remote_i_size = netfs_inode(inode)->remote_i_size;
+ if (end > remote_i_size && i_size > remote_i_size) {
+ unsigned long long extend_to = umin(end, i_size);
+ rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid, cfile->pid, extend_to);
+ if (rc >= 0)
+ netfs_inode(inode)->remote_i_size = extend_to;
+ }
+
+unlock:
filemap_invalidate_unlock(inode->i_mapping);
out:
inode_unlock(inode);
@@ -4446,7 +4468,6 @@
}
iov_iter_xarray(&new->rq_iter, ITER_SOURCE,
buffer, 0, size);
- new->rq_iter_size = size;
}
}
@@ -4492,7 +4513,6 @@
rqst.rq_nvec = 2;
if (iter) {
rqst.rq_iter = *iter;
- rqst.rq_iter_size = iov_iter_count(iter);
iter_size = iov_iter_count(iter);
}
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 83facb5..2d7e6c4 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4441,7 +4441,7 @@
* If we want to do a RDMA write, fill in and append
* smbd_buffer_descriptor_v1 to the end of read request
*/
- if (smb3_use_rdma_offload(io_parms)) {
+ if (rdata && smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
@@ -4523,7 +4523,6 @@
if (rdata->got_bytes) {
rqst.rq_iter = rdata->subreq.io_iter;
- rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter);
}
WARN_ONCE(rdata->server != mid->server,
@@ -4914,6 +4913,13 @@
if (rc)
goto out;
+ rqst.rq_iov = iov;
+ rqst.rq_iter = wdata->subreq.io_iter;
+
+ rqst.rq_iov[0].iov_len = total_len - 1;
+ rqst.rq_iov[0].iov_base = (char *)req;
+ rqst.rq_nvec += 1;
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -4925,6 +4931,7 @@
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = SMB2_CHANNEL_NONE;
+ req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer));
@@ -4944,7 +4951,6 @@
*/
if (smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
- size_t data_size = iov_iter_count(&wdata->subreq.io_iter);
bool need_invalidate = server->dialect == SMB30_PROT_ID;
wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
@@ -4953,9 +4959,10 @@
rc = -EAGAIN;
goto async_writev_out;
}
+ /* For RDMA read, I/O size is in RemainingBytes not in Length */
+ req->RemainingBytes = req->Length;
req->Length = 0;
req->DataOffset = 0;
- req->RemainingBytes = cpu_to_le32(data_size);
req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
if (need_invalidate)
req->Channel = SMB2_CHANNEL_RDMA_V1;
@@ -4967,31 +4974,22 @@
v1->offset = cpu_to_le64(wdata->mr->mr->iova);
v1->token = cpu_to_le32(wdata->mr->mr->rkey);
v1->length = cpu_to_le32(wdata->mr->mr->length);
+
+ rqst.rq_iov[0].iov_len += sizeof(*v1);
+
+ /*
+ * We keep wdata->subreq.io_iter,
+ * but we have to truncate rqst.rq_iter
+ */
+ iov_iter_truncate(&rqst.rq_iter, 0);
}
#endif
- iov[0].iov_len = total_len - 1;
- iov[0].iov_base = (char *)req;
- rqst.rq_iov = iov;
- rqst.rq_nvec = 1;
- rqst.rq_iter = wdata->subreq.io_iter;
- rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
smb2_set_replay(server, &rqst);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (wdata->mr)
- iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
-#endif
- cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
- io_parms->offset, io_parms->length, iov_iter_count(&rqst.rq_iter));
-#ifdef CONFIG_CIFS_SMB_DIRECT
- /* For RDMA read, I/O size is in RemainingBytes not in Length */
- if (!wdata->mr)
- req->Length = cpu_to_le32(io_parms->length);
-#else
- req->Length = cpu_to_le32(io_parms->length);
-#endif
+ cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
+ io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
if (wdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 496e2f7..797d5b5 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -749,7 +749,7 @@
if (error)
return error;
- cur = xfs_inobt_init_cursor(pag, tp, agbp);
+ cur = xfs_finobt_init_cursor(pag, tp, agbp);
error = xfs_btree_count_blocks(cur, tree_blocks);
xfs_btree_del_cursor(cur, error);
xfs_trans_brelse(tp, agbp);
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 513b50d..79babea 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -514,12 +514,18 @@
return __this_address;
}
- if (dip->di_version > 1) {
+ /*
+ * Historical note: xfsprogs in the 3.2 era set up its incore inodes to
+ * have di_nlink track the link count, even if the actual filesystem
+ * only supported V1 inodes (i.e. di_onlink). When writing out the
+ * ondisk inode, it would set both the ondisk di_nlink and di_onlink to
+ * the the incore di_nlink value, which is why we cannot check for
+ * di_nlink==0 on a V1 inode. V2/3 inodes would get written out with
+ * di_onlink==0, so we can check that.
+ */
+ if (dip->di_version >= 2) {
if (dip->di_onlink)
return __this_address;
- } else {
- if (dip->di_nlink)
- return __this_address;
}
/* don't allow invalid i_size */
diff --git a/fs/xfs/scrub/xfile.c b/fs/xfs/scrub/xfile.c
index d848222..9b5d98f 100644
--- a/fs/xfs/scrub/xfile.c
+++ b/fs/xfs/scrub/xfile.c
@@ -293,7 +293,7 @@
* (potentially last) reference in xfile_put_folio.
*/
if (flags & XFILE_ALLOC)
- folio_set_dirty(folio);
+ folio_mark_dirty(folio);
return folio;
}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 6f0fc7f..25f5dff 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -158,8 +158,7 @@
xfs_trim_gather_extents(
struct xfs_perag *pag,
struct xfs_trim_cur *tcur,
- struct xfs_busy_extents *extents,
- uint64_t *blocks_trimmed)
+ struct xfs_busy_extents *extents)
{
struct xfs_mount *mp = pag->pag_mount;
struct xfs_trans *tp;
@@ -280,7 +279,6 @@
xfs_extent_busy_insert_discard(pag, fbno, flen,
&extents->extent_list);
- *blocks_trimmed += flen;
next_extent:
if (tcur->by_bno)
error = xfs_btree_increment(cur, 0, &i);
@@ -327,8 +325,7 @@
struct xfs_perag *pag,
xfs_agblock_t start,
xfs_agblock_t end,
- xfs_extlen_t minlen,
- uint64_t *blocks_trimmed)
+ xfs_extlen_t minlen)
{
struct xfs_trim_cur tcur = {
.start = start,
@@ -354,8 +351,7 @@
extents->owner = extents;
INIT_LIST_HEAD(&extents->extent_list);
- error = xfs_trim_gather_extents(pag, &tcur, extents,
- blocks_trimmed);
+ error = xfs_trim_gather_extents(pag, &tcur, extents);
if (error) {
kfree(extents);
break;
@@ -389,8 +385,7 @@
struct xfs_mount *mp,
xfs_daddr_t start,
xfs_daddr_t end,
- xfs_extlen_t minlen,
- uint64_t *blocks_trimmed)
+ xfs_extlen_t minlen)
{
xfs_agnumber_t start_agno, end_agno;
xfs_agblock_t start_agbno, end_agbno;
@@ -411,8 +406,7 @@
if (start_agno == end_agno)
agend = end_agbno;
- error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen,
- blocks_trimmed);
+ error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen);
if (error)
last_error = error;
@@ -431,9 +425,6 @@
/* list of rt extents to free */
struct list_head extent_list;
- /* pointer to count of blocks trimmed */
- uint64_t *blocks_trimmed;
-
/* minimum length that caller allows us to trim */
xfs_rtblock_t minlen_fsb;
@@ -551,7 +542,6 @@
busyp->length = rlen;
INIT_LIST_HEAD(&busyp->list);
list_add_tail(&busyp->list, &tr->extent_list);
- *tr->blocks_trimmed += rlen;
tr->restart_rtx = rec->ar_startext + rec->ar_extcount;
return 0;
@@ -562,13 +552,11 @@
struct xfs_mount *mp,
xfs_daddr_t start,
xfs_daddr_t end,
- xfs_daddr_t minlen,
- uint64_t *blocks_trimmed)
+ xfs_daddr_t minlen)
{
struct xfs_rtalloc_rec low = { };
struct xfs_rtalloc_rec high = { };
struct xfs_trim_rtdev tr = {
- .blocks_trimmed = blocks_trimmed,
.minlen_fsb = XFS_BB_TO_FSB(mp, minlen),
};
struct xfs_trans *tp;
@@ -634,7 +622,7 @@
return error;
}
#else
-# define xfs_trim_rtdev_extents(m,s,e,n,b) (-EOPNOTSUPP)
+# define xfs_trim_rtdev_extents(...) (-EOPNOTSUPP)
#endif /* CONFIG_XFS_RT */
/*
@@ -661,7 +649,6 @@
xfs_daddr_t start, end;
xfs_extlen_t minlen;
xfs_rfsblock_t max_blocks;
- uint64_t blocks_trimmed = 0;
int error, last_error = 0;
if (!capable(CAP_SYS_ADMIN))
@@ -706,15 +693,13 @@
end = start + BTOBBT(range.len) - 1;
if (bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev)) {
- error = xfs_trim_datadev_extents(mp, start, end, minlen,
- &blocks_trimmed);
+ error = xfs_trim_datadev_extents(mp, start, end, minlen);
if (error)
last_error = error;
}
if (rt_bdev && !xfs_trim_should_stop()) {
- error = xfs_trim_rtdev_extents(mp, start, end, minlen,
- &blocks_trimmed);
+ error = xfs_trim_rtdev_extents(mp, start, end, minlen);
if (error)
last_error = error;
}
@@ -722,7 +707,8 @@
if (last_error)
return last_error;
- range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
+ range.len = min_t(unsigned long long, range.len,
+ XFS_FSB_TO_B(mp, max_blocks));
if (copy_to_user(urange, &range, sizeof(range)))
return -EFAULT;
return 0;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 85dbb464..71f3235 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -71,7 +71,7 @@
switch (src->fmr_owner) {
case 0: /* "lowest owner id possible" */
case -1ULL: /* "highest owner id possible" */
- dest->rm_owner = 0;
+ dest->rm_owner = src->fmr_owner;
break;
case XFS_FMR_OWN_FREE:
dest->rm_owner = XFS_RMAP_OWN_NULL;
@@ -162,6 +162,7 @@
xfs_daddr_t next_daddr; /* next daddr we expect */
/* daddr of low fsmap key when we're using the rtbitmap */
xfs_daddr_t low_daddr;
+ xfs_daddr_t end_daddr; /* daddr of high fsmap key */
u64 missing_owner; /* owner of holes */
u32 dev; /* device id */
/*
@@ -182,6 +183,7 @@
int (*fn)(struct xfs_trans *tp,
const struct xfs_fsmap *keys,
struct xfs_getfsmap_info *info);
+ sector_t nr_sectors;
};
/* Compare two getfsmap device handlers. */
@@ -252,7 +254,7 @@
const struct xfs_rmap_irec *rec,
xfs_daddr_t rec_daddr)
{
- if (info->low_daddr != -1ULL)
+ if (info->low_daddr != XFS_BUF_DADDR_NULL)
return rec_daddr < info->low_daddr;
if (info->low.rm_blockcount)
return xfs_rmap_compare(rec, &info->low) < 0;
@@ -294,6 +296,18 @@
return 0;
}
+ /*
+ * For an info->last query, we're looking for a gap between the last
+ * mapping emitted and the high key specified by userspace. If the
+ * user's query spans less than 1 fsblock, then info->high and
+ * info->low will have the same rm_startblock, which causes rec_daddr
+ * and next_daddr to be the same. Therefore, use the end_daddr that
+ * we calculated from userspace's high key to synthesize the record.
+ * Note that if the btree query found a mapping, there won't be a gap.
+ */
+ if (info->last && info->end_daddr != XFS_BUF_DADDR_NULL)
+ rec_daddr = info->end_daddr;
+
/* Are we just counting mappings? */
if (info->head->fmh_count == 0) {
if (info->head->fmh_entries == UINT_MAX)
@@ -904,17 +918,21 @@
/* Set up our device handlers. */
memset(handlers, 0, sizeof(handlers));
+ handlers[0].nr_sectors = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev);
if (use_rmap)
handlers[0].fn = xfs_getfsmap_datadev_rmapbt;
else
handlers[0].fn = xfs_getfsmap_datadev_bnobt;
if (mp->m_logdev_targp != mp->m_ddev_targp) {
+ handlers[1].nr_sectors = XFS_FSB_TO_BB(mp,
+ mp->m_sb.sb_logblocks);
handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
handlers[1].fn = xfs_getfsmap_logdev;
}
#ifdef CONFIG_XFS_RT
if (mp->m_rtdev_targp) {
+ handlers[2].nr_sectors = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
}
@@ -946,6 +964,7 @@
info.next_daddr = head->fmh_keys[0].fmr_physical +
head->fmh_keys[0].fmr_length;
+ info.end_daddr = XFS_BUF_DADDR_NULL;
info.fsmap_recs = fsmap_recs;
info.head = head;
@@ -966,8 +985,11 @@
* low key, zero out the low key so that we get
* everything from the beginning.
*/
- if (handlers[i].dev == head->fmh_keys[1].fmr_device)
+ if (handlers[i].dev == head->fmh_keys[1].fmr_device) {
dkeys[1] = head->fmh_keys[1];
+ info.end_daddr = min(handlers[i].nr_sectors - 1,
+ dkeys[1].fmr_physical);
+ }
if (handlers[i].dev > head->fmh_keys[0].fmr_device)
memset(&dkeys[0], 0, sizeof(struct xfs_fsmap));
@@ -983,7 +1005,7 @@
info.dev = handlers[i].dev;
info.last = false;
info.pag = NULL;
- info.low_daddr = -1ULL;
+ info.low_daddr = XFS_BUF_DADDR_NULL;
info.low.rm_blockcount = 0;
error = handlers[i].fn(tp, dkeys, &info);
if (error)
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 0c3e96c..ebeab8e 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -785,6 +785,39 @@
}
/*
+ * If we changed the rt extent size (meaning there was no rt volume previously)
+ * and the root directory had EXTSZINHERIT and RTINHERIT set, it's possible
+ * that the extent size hint on the root directory is no longer congruent with
+ * the new rt extent size. Log the rootdir inode to fix this.
+ */
+static int
+xfs_growfs_rt_fixup_extsize(
+ struct xfs_mount *mp)
+{
+ struct xfs_inode *ip = mp->m_rootip;
+ struct xfs_trans *tp;
+ int error = 0;
+
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ if (!(ip->i_diflags & XFS_DIFLAG_RTINHERIT) ||
+ !(ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT))
+ goto out_iolock;
+
+ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_ichange, 0, 0, false,
+ &tp);
+ if (error)
+ goto out_iolock;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+out_iolock:
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ return error;
+}
+
+/*
* Visible (exported) functions.
*/
@@ -812,6 +845,7 @@
xfs_extlen_t rsumblocks; /* current number of rt summary blks */
xfs_sb_t *sbp; /* old superblock */
uint8_t *rsum_cache; /* old summary cache */
+ xfs_agblock_t old_rextsize = mp->m_sb.sb_rextsize;
sbp = &mp->m_sb;
@@ -821,34 +855,39 @@
/* Needs to have been mounted with an rt device. */
if (!XFS_IS_REALTIME_MOUNT(mp))
return -EINVAL;
+
+ if (!mutex_trylock(&mp->m_growlock))
+ return -EWOULDBLOCK;
/*
* Mount should fail if the rt bitmap/summary files don't load, but
* we'll check anyway.
*/
+ error = -EINVAL;
if (!mp->m_rbmip || !mp->m_rsumip)
- return -EINVAL;
+ goto out_unlock;
/* Shrink not supported. */
if (in->newblocks <= sbp->sb_rblocks)
- return -EINVAL;
+ goto out_unlock;
/* Can only change rt extent size when adding rt volume. */
if (sbp->sb_rblocks > 0 && in->extsize != sbp->sb_rextsize)
- return -EINVAL;
+ goto out_unlock;
/* Range check the extent size. */
if (XFS_FSB_TO_B(mp, in->extsize) > XFS_MAX_RTEXTSIZE ||
XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE)
- return -EINVAL;
+ goto out_unlock;
/* Unsupported realtime features. */
+ error = -EOPNOTSUPP;
if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp) || xfs_has_quota(mp))
- return -EOPNOTSUPP;
+ goto out_unlock;
nrblocks = in->newblocks;
error = xfs_sb_validate_fsb_count(sbp, nrblocks);
if (error)
- return error;
+ goto out_unlock;
/*
* Read in the last block of the device, make sure it exists.
*/
@@ -856,7 +895,7 @@
XFS_FSB_TO_BB(mp, nrblocks - 1),
XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
if (error)
- return error;
+ goto out_unlock;
xfs_buf_relse(bp);
/*
@@ -864,8 +903,10 @@
*/
nrextents = nrblocks;
do_div(nrextents, in->extsize);
- if (!xfs_validate_rtextents(nrextents))
- return -EINVAL;
+ if (!xfs_validate_rtextents(nrextents)) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
nrbmblocks = xfs_rtbitmap_blockcount(mp, nrextents);
nrextslog = xfs_compute_rextslog(nrextents);
nrsumlevels = nrextslog + 1;
@@ -876,8 +917,11 @@
* the log. This prevents us from getting a log overflow,
* since we'll log basically the whole summary file at once.
*/
- if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1))
- return -EINVAL;
+ if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* Get the old block counts for bitmap and summary inodes.
* These can't change since other growfs callers are locked out.
@@ -889,10 +933,10 @@
*/
error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, mp->m_rbmip);
if (error)
- return error;
+ goto out_unlock;
error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip);
if (error)
- return error;
+ goto out_unlock;
rsum_cache = mp->m_rsum_cache;
if (nrbmblocks != sbp->sb_rbmblocks)
@@ -1036,6 +1080,12 @@
if (error)
goto out_free;
+ if (old_rextsize != in->extsize) {
+ error = xfs_growfs_rt_fixup_extsize(mp);
+ if (error)
+ goto out_free;
+ }
+
/* Update secondary superblocks now the physical grow has completed */
error = xfs_update_secondary_sbs(mp);
@@ -1059,6 +1109,8 @@
}
}
+out_unlock:
+ mutex_unlock(&mp->m_growlock);
return error;
}
diff --git a/include/drm/intel/i915_pciids.h b/include/drm/intel/i915_pciids.h
index b21374f..2bf03eb 100644
--- a/include/drm/intel/i915_pciids.h
+++ b/include/drm/intel/i915_pciids.h
@@ -772,15 +772,18 @@
INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
/* MTL */
-#define INTEL_MTL_IDS(MACRO__, ...) \
- MACRO__(0x7D40, ## __VA_ARGS__), \
+#define INTEL_ARL_IDS(MACRO__, ...) \
MACRO__(0x7D41, ## __VA_ARGS__), \
- MACRO__(0x7D45, ## __VA_ARGS__), \
MACRO__(0x7D51, ## __VA_ARGS__), \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0x7DD1, ## __VA_ARGS__)
+
+#define INTEL_MTL_IDS(MACRO__, ...) \
+ INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__), \
MACRO__(0x7D55, ## __VA_ARGS__), \
MACRO__(0x7D60, ## __VA_ARGS__), \
- MACRO__(0x7D67, ## __VA_ARGS__), \
- MACRO__(0x7DD1, ## __VA_ARGS__), \
MACRO__(0x7DD5, ## __VA_ARGS__)
/* LNL */
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index ef0f52f..6ccf96c 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -39,11 +39,7 @@
#include "ttm_device.h"
/* Default number of pre-faulted pages in the TTM fault handler */
-#if CONFIG_PGTABLE_LEVELS > 2
-#define TTM_BO_VM_NUM_PREFAULT (1 << (PMD_SHIFT - PAGE_SHIFT))
-#else
#define TTM_BO_VM_NUM_PREFAULT 16
-#endif
struct iosys_map;
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 1dc5b3b..3387897b 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -26,51 +26,6 @@
};
/**
- * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
- * @client: The QSEECOM client device.
- *
- * Returns the SCM device under which the provided QSEECOM client device
- * operates. This function is intended to be used for DMA allocations.
- */
-static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
-{
- return client->aux_dev.dev.parent->parent;
-}
-
-/**
- * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
- * @client: The QSEECOM client to allocate the memory for.
- * @size: The number of bytes to allocate.
- * @dma_handle: Pointer to where the DMA address should be stored.
- * @gfp: Allocation flags.
- *
- * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
- * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
- */
-static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
-}
-
-/**
- * dma_free_coherent() - Free QSEECOM DMA memory.
- * @client: The QSEECOM client for which the memory has been allocated.
- * @size: The number of bytes allocated.
- * @cpu_addr: Virtual memory address to free.
- * @dma_handle: DMA memory address to free.
- *
- * Wrapper function for dma_free_coherent(), freeing memory previously
- * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
- * details.
- */
-static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
-}
-
-/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
* @req: Request buffer sent to the app (must be TZ memory).
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fb0426f..6ca11e2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -210,6 +210,7 @@
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
#define ATTR_TOUCH (1 << 17)
+#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
/*
* Whiteout is represented by a char device. The following constants define the
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 04cbdae..bd722f4 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1563,7 +1563,7 @@
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
void iopf_free_group(struct iopf_group *group);
-void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
+int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
void iopf_group_response(struct iopf_group *group,
enum iommu_page_response_code status);
#else
@@ -1601,9 +1601,10 @@
{
}
-static inline void
+static inline int
iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{
+ return -ENODEV;
}
static inline void iopf_group_response(struct iopf_group *group,
diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
index fd124aa..7cddf10 100644
--- a/include/linux/soc/qcom/pmic_glink.h
+++ b/include/linux/soc/qcom/pmic_glink.h
@@ -23,10 +23,11 @@
int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
- unsigned int id,
- void (*cb)(const void *, size_t, void *),
- void (*pdr)(void *, int),
- void *priv);
+struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv);
+void pmic_glink_client_register(struct pmic_glink_client *client);
#endif
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index c9cb657..bef5f06 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -58,11 +58,11 @@
#ifdef CONFIG_SYSFB
-void sysfb_disable(void);
+void sysfb_disable(struct device *dev);
#else /* CONFIG_SYSFB */
-static inline void sysfb_disable(void)
+static inline void sysfb_disable(struct device *dev)
{
}
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b61fb1a..8bb5f01 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -260,7 +260,7 @@
#ifdef CONFIG_XFRM_OFFLOAD
struct list_head ipsec_list;
/* protecting ipsec_list */
- spinlock_t ipsec_lock;
+ struct mutex ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
};
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 9b09aca..522f1da 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -68,7 +68,7 @@
static inline unsigned long busy_loop_current_time(void)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- return (unsigned long)(local_clock() >> 10);
+ return (unsigned long)(ktime_get_ns() >> 10);
#else
return 0;
#endif
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 60a7d0c..fcf9672 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -19,7 +19,7 @@
static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
struct iphdr *iph, _iph;
- u32 len, thoff;
+ u32 len, thoff, skb_len;
iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
sizeof(*iph), &_iph);
@@ -30,8 +30,10 @@
return -1;
len = iph_totlen(pkt->skb, iph);
- thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
- if (pkt->skb->len < len)
+ thoff = iph->ihl * 4;
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+
+ if (skb_len < len)
return -1;
else if (len < thoff)
return -1;
@@ -40,7 +42,7 @@
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
- pkt->thoff = thoff;
+ pkt->thoff = skb_network_offset(pkt->skb) + thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 467d59b..a0633ee 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -31,8 +31,8 @@
struct ipv6hdr *ip6h, _ip6h;
unsigned int thoff = 0;
unsigned short frag_off;
+ u32 pkt_len, skb_len;
int protohdr;
- u32 pkt_len;
ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
sizeof(*ip6h), &_ip6h);
@@ -43,7 +43,8 @@
return -1;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+ if (pkt_len + sizeof(*ip6h) > skb_len)
return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 1af2bd5..bdfa30b 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -129,7 +129,7 @@
iov[0].iov_base = buf;
iov[0].iov_len = *len;
- return 0;
+ return 1;
}
static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br,
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index a860516..453867a 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -394,10 +394,11 @@
struct io_uring_rsrc_update2 *up,
unsigned int nr_args)
{
- struct iovec __user *uvec = u64_to_user_ptr(up->data);
u64 __user *tags = u64_to_user_ptr(up->tags);
struct iovec fast_iov, *iov;
struct page *last_hpage = NULL;
+ struct iovec __user *uvec;
+ u64 user_data = up->data;
__u32 done;
int i, err;
@@ -410,7 +411,8 @@
struct io_mapped_ubuf *imu;
u64 tag = 0;
- iov = iovec_from_user(&uvec[done], 1, 1, &fast_iov, ctx->compat);
+ uvec = u64_to_user_ptr(user_data);
+ iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
if (IS_ERR(iov)) {
err = PTR_ERR(iov);
break;
@@ -443,6 +445,10 @@
ctx->user_bufs[i] = imu;
*io_get_tag_slot(ctx->buf_data, i) = tag;
+ if (ctx->compat)
+ user_data += sizeof(struct compat_iovec);
+ else
+ user_data += sizeof(struct iovec);
}
return done ? done : err;
}
@@ -949,7 +955,7 @@
struct page *last_hpage = NULL;
struct io_rsrc_data *data;
struct iovec fast_iov, *iov = &fast_iov;
- const struct iovec __user *uvec = (struct iovec * __user) arg;
+ const struct iovec __user *uvec;
int i, ret;
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
@@ -972,7 +978,8 @@
for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
if (arg) {
- iov = iovec_from_user(&uvec[i], 1, 1, &fast_iov, ctx->compat);
+ uvec = (struct iovec __user *) arg;
+ iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
if (IS_ERR(iov)) {
ret = PTR_ERR(iov);
break;
@@ -980,6 +987,10 @@
ret = io_buffer_validate(iov);
if (ret)
break;
+ if (ctx->compat)
+ arg += sizeof(struct compat_iovec);
+ else
+ arg += sizeof(struct iovec);
}
if (!iov->iov_base && *io_get_tag_slot(data, i)) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 18bdc87..cc76049 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2053,24 +2053,11 @@
*/
int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
{
- if (!pid)
+ bool thread = flags & PIDFD_THREAD;
+
+ if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID))
return -EINVAL;
- scoped_guard(rcu) {
- struct task_struct *tsk;
-
- if (flags & PIDFD_THREAD)
- tsk = pid_task(pid, PIDTYPE_PID);
- else
- tsk = pid_task(pid, PIDTYPE_TGID);
- if (!tsk)
- return -EINVAL;
-
- /* Don't create pidfds for kernel threads for now. */
- if (tsk->flags & PF_KTHREAD)
- return -EINVAL;
- }
-
return __pidfd_prepare(pid, flags, ret);
}
@@ -2416,12 +2403,6 @@
if (clone_flags & CLONE_PIDFD) {
int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
- /* Don't create pidfds for kernel threads for now. */
- if (args->kthread) {
- retval = -EINVAL;
- goto bad_fork_free_pid;
- }
-
/* Note that no task has been attached to @pid yet. */
retval = __pidfd_prepare(pid, flags, &pidfile);
if (retval < 0)
diff --git a/lib/vdso/getrandom.c b/lib/vdso/getrandom.c
index b230f0b..e1db228 100644
--- a/lib/vdso/getrandom.c
+++ b/lib/vdso/getrandom.c
@@ -85,6 +85,10 @@
if (unlikely(((unsigned long)opaque_state & ~PAGE_MASK) + sizeof(*state) > PAGE_SIZE))
return -EFAULT;
+ /* Handle unexpected flags by falling back to the kernel. */
+ if (unlikely(flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)))
+ goto fallback_syscall;
+
/* If the caller passes the wrong size, which might happen due to CRIU, fallback. */
if (unlikely(opaque_len != sizeof(*state)))
goto fallback_syscall;
diff --git a/mm/truncate.c b/mm/truncate.c
index 4d61fbd..0668cd3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -157,7 +157,7 @@
if (folio_mapped(folio))
unmap_mapping_folio(folio);
- if (folio_has_private(folio))
+ if (folio_needs_release(folio))
folio_invalidate(folio, 0, folio_size(folio));
/*
@@ -219,7 +219,7 @@
if (!mapping_inaccessible(folio->mapping))
folio_zero_range(folio, offset, length);
- if (folio_has_private(folio))
+ if (folio_needs_release(folio))
folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index f25a21f..d6976db 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2406,10 +2406,16 @@
/* To avoid a potential race with hci_unregister_dev. */
hci_dev_hold(hdev);
- if (action == PM_SUSPEND_PREPARE)
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
ret = hci_suspend_dev(hdev);
- else if (action == PM_POST_SUSPEND)
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
ret = hci_resume_dev(hdev);
+ break;
+ }
if (ret)
bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 0e2084c..444f23e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -235,7 +235,7 @@
if (!rtnl_trylock())
return restart_syscall();
- if (netif_running(netdev) && netif_device_present(netdev)) {
+ if (netif_running(netdev)) {
struct ethtool_link_ksettings cmd;
if (!__ethtool_get_link_ksettings(netdev, &cmd))
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index ea55a75..197a50e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3654,7 +3654,7 @@
struct pktgen_dev *pkt_dev = NULL;
int cpu = t->cpu;
- WARN_ON(smp_processor_id() != cpu);
+ WARN_ON_ONCE(smp_processor_id() != cpu);
init_waitqueue_head(&t->queue);
complete(&t->start_done);
@@ -3989,6 +3989,7 @@
goto remove;
}
+ cpus_read_lock();
for_each_online_cpu(cpu) {
int err;
@@ -3997,6 +3998,7 @@
pr_warn("Cannot create thread for cpu %d (%d)\n",
cpu, err);
}
+ cpus_read_unlock();
if (list_empty(&pn->pktgen_threads)) {
pr_err("Initialization failed for all threads\n");
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index e18823b..ae041f5 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -442,6 +442,9 @@
if (!dev->ethtool_ops->get_link_ksettings)
return -EOPNOTSUPP;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
memset(link_ksettings, 0, sizeof(*link_ksettings));
return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e03a342..831a18d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -4637,6 +4637,13 @@
/* Don't race with userspace socket closes such as tcp_close. */
lock_sock(sk);
+ /* Avoid closing the same socket twice. */
+ if (sk->sk_state == TCP_CLOSE) {
+ if (!has_current_bpf_ctx())
+ release_sock(sk);
+ return -ENOENT;
+ }
+
if (sk->sk_state == TCP_LISTEN) {
tcp_set_state(sk, TCP_CLOSE);
inet_csk_listen_stop(sk);
@@ -4646,16 +4653,13 @@
local_bh_disable();
bh_lock_sock(sk);
- if (!sock_flag(sk, SOCK_DEAD)) {
- if (tcp_need_reset(sk->sk_state))
- tcp_send_active_reset(sk, GFP_ATOMIC,
- SK_RST_REASON_NOT_SPECIFIED);
- tcp_done_with_error(sk, err);
- }
+ if (tcp_need_reset(sk->sk_state))
+ tcp_send_active_reset(sk, GFP_ATOMIC,
+ SK_RST_REASON_NOT_SPECIFIED);
+ tcp_done_with_error(sk, err);
bh_unlock_sock(sk);
local_bh_enable();
- tcp_write_queue_purge(sk);
if (!has_current_bpf_ctx())
release_sock(sk);
return 0;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4779a18..f9526bb 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6664,7 +6664,7 @@
return true;
/* hidden SSID: zeroed out */
- if (memcmp(elems->ssid, zero_ssid, elems->ssid_len))
+ if (!memcmp(elems->ssid, zero_ssid, elems->ssid_len))
return false;
return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index edba4a3..bca7b34 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5348,8 +5348,10 @@
if (beacon->tail)
skb_put_data(skb, beacon->tail, beacon->tail_len);
- if (ieee80211_beacon_protect(skb, local, sdata, link) < 0)
+ if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) {
+ dev_kfree_skb(skb);
return NULL;
+ }
ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb,
chanctx_conf, csa_off_base);
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index ad28da6..a29ff90 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -68,12 +68,12 @@
skb = skb_peek_tail(&sk->sk_receive_queue);
if (skb) {
WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
- pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk,
+ pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk,
MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq,
MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq);
MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq;
MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq;
}
- pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
+ pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq);
}
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index ac2f1a5..370c383 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -117,7 +117,7 @@
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
ptr += 2;
}
- pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u",
+ pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u\n",
version, flags, opsize, mp_opt->sndr_key,
mp_opt->rcvr_key, mp_opt->data_len, mp_opt->csum);
break;
@@ -131,7 +131,7 @@
ptr += 4;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
- pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
+ pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->token, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
@@ -142,19 +142,19 @@
ptr += 8;
mp_opt->nonce = get_unaligned_be32(ptr);
ptr += 4;
- pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
mp_opt->backup, mp_opt->join_id,
mp_opt->thmac, mp_opt->nonce);
} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
mp_opt->suboptions |= OPTION_MPTCP_MPJ_ACK;
ptr += 2;
memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
- pr_debug("MP_JOIN hmac");
+ pr_debug("MP_JOIN hmac\n");
}
break;
case MPTCPOPT_DSS:
- pr_debug("DSS");
+ pr_debug("DSS\n");
ptr++;
/* we must clear 'mpc_map' be able to detect MP_CAPABLE
@@ -169,7 +169,7 @@
mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
- pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
+ pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
mp_opt->data_fin, mp_opt->dsn64,
mp_opt->use_map, mp_opt->ack64,
mp_opt->use_ack);
@@ -207,7 +207,7 @@
ptr += 4;
}
- pr_debug("data_ack=%llu", mp_opt->data_ack);
+ pr_debug("data_ack=%llu\n", mp_opt->data_ack);
}
if (mp_opt->use_map) {
@@ -231,7 +231,7 @@
ptr += 2;
}
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
mp_opt->data_seq, mp_opt->subflow_seq,
mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD),
mp_opt->csum);
@@ -293,7 +293,7 @@
mp_opt->ahmac = get_unaligned_be64(ptr);
ptr += 8;
}
- pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d",
+ pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d\n",
(mp_opt->addr.family == AF_INET6) ? "6" : "",
mp_opt->addr.id, mp_opt->ahmac, mp_opt->echo, ntohs(mp_opt->addr.port));
break;
@@ -309,7 +309,7 @@
mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE;
for (i = 0; i < mp_opt->rm_list.nr; i++)
mp_opt->rm_list.ids[i] = *ptr++;
- pr_debug("RM_ADDR: rm_list_nr=%d", mp_opt->rm_list.nr);
+ pr_debug("RM_ADDR: rm_list_nr=%d\n", mp_opt->rm_list.nr);
break;
case MPTCPOPT_MP_PRIO:
@@ -318,7 +318,7 @@
mp_opt->suboptions |= OPTION_MPTCP_PRIO;
mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP;
- pr_debug("MP_PRIO: prio=%d", mp_opt->backup);
+ pr_debug("MP_PRIO: prio=%d\n", mp_opt->backup);
break;
case MPTCPOPT_MP_FASTCLOSE:
@@ -329,7 +329,7 @@
mp_opt->rcvr_key = get_unaligned_be64(ptr);
ptr += 8;
mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE;
- pr_debug("MP_FASTCLOSE: recv_key=%llu", mp_opt->rcvr_key);
+ pr_debug("MP_FASTCLOSE: recv_key=%llu\n", mp_opt->rcvr_key);
break;
case MPTCPOPT_RST:
@@ -343,7 +343,7 @@
flags = *ptr++;
mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT;
mp_opt->reset_reason = *ptr;
- pr_debug("MP_RST: transient=%u reason=%u",
+ pr_debug("MP_RST: transient=%u reason=%u\n",
mp_opt->reset_transient, mp_opt->reset_reason);
break;
@@ -354,7 +354,7 @@
ptr += 2;
mp_opt->suboptions |= OPTION_MPTCP_FAIL;
mp_opt->fail_seq = get_unaligned_be64(ptr);
- pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq);
+ pr_debug("MP_FAIL: data_seq=%llu\n", mp_opt->fail_seq);
break;
default:
@@ -417,7 +417,7 @@
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
- pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
+ pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
subflow->local_nonce);
opts->suboptions = OPTION_MPTCP_MPJ_SYN;
opts->join_id = subflow->local_id;
@@ -500,7 +500,7 @@
*size = TCPOLEN_MPTCP_MPC_ACK;
}
- pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
+ pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
subflow, subflow->local_key, subflow->remote_key,
data_len);
@@ -509,7 +509,7 @@
opts->suboptions = OPTION_MPTCP_MPJ_ACK;
memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
*size = TCPOLEN_MPTCP_MPJ_ACK;
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
/* we can use the full delegate action helper only from BH context
* If we are in process context - sk is flushing the backlog at
@@ -675,7 +675,7 @@
*size = len;
if (drop_other_suboptions) {
- pr_debug("drop other suboptions");
+ pr_debug("drop other suboptions\n");
opts->suboptions = 0;
/* note that e.g. DSS could have written into the memory
@@ -695,7 +695,7 @@
} else {
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX);
}
- pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d",
+ pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d\n",
opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port));
return true;
@@ -726,7 +726,7 @@
opts->rm_list = rm_list;
for (i = 0; i < opts->rm_list.nr; i++)
- pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]);
+ pr_debug("rm_list_ids[%d]=%d\n", i, opts->rm_list.ids[i]);
MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr);
return true;
}
@@ -752,7 +752,7 @@
opts->suboptions |= OPTION_MPTCP_PRIO;
opts->backup = subflow->request_bkup;
- pr_debug("prio=%d", opts->backup);
+ pr_debug("prio=%d\n", opts->backup);
return true;
}
@@ -794,7 +794,7 @@
opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
opts->rcvr_key = READ_ONCE(msk->remote_key);
- pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
+ pr_debug("FASTCLOSE key=%llu\n", opts->rcvr_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX);
return true;
}
@@ -816,7 +816,7 @@
opts->suboptions |= OPTION_MPTCP_FAIL;
opts->fail_seq = subflow->map_seq;
- pr_debug("MP_FAIL fail_seq=%llu", opts->fail_seq);
+ pr_debug("MP_FAIL fail_seq=%llu\n", opts->fail_seq);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
return true;
@@ -904,7 +904,7 @@
opts->csum_reqd = subflow_req->csum_reqd;
opts->allow_join_id0 = subflow_req->allow_join_id0;
*size = TCPOLEN_MPTCP_MPC_SYNACK;
- pr_debug("subflow_req=%p, local_key=%llu",
+ pr_debug("subflow_req=%p, local_key=%llu\n",
subflow_req, subflow_req->local_key);
return true;
} else if (subflow_req->mp_join) {
@@ -913,7 +913,7 @@
opts->join_id = subflow_req->local_id;
opts->thmac = subflow_req->thmac;
opts->nonce = subflow_req->local_nonce;
- pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
+ pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
subflow_req, opts->backup, opts->join_id,
opts->thmac, opts->nonce);
*size = TCPOLEN_MPTCP_MPJ_SYNACK;
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 3e6e0f5..37f6dbc 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -19,7 +19,7 @@
{
u8 add_addr = READ_ONCE(msk->pm.addr_signal);
- pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo);
+ pr_debug("msk=%p, local_id=%d, echo=%d\n", msk, addr->id, echo);
lockdep_assert_held(&msk->pm.lock);
@@ -45,7 +45,7 @@
{
u8 rm_addr = READ_ONCE(msk->pm.addr_signal);
- pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr);
+ pr_debug("msk=%p, rm_list_nr=%d\n", msk, rm_list->nr);
if (rm_addr) {
MPTCP_ADD_STATS(sock_net((struct sock *)msk),
@@ -66,7 +66,7 @@
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side);
+ pr_debug("msk=%p, token=%u side=%d\n", msk, READ_ONCE(msk->token), server_side);
WRITE_ONCE(pm->server_side, server_side);
mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC);
@@ -90,7 +90,7 @@
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
+ pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
subflows_max, READ_ONCE(pm->accept_subflow));
/* try to avoid acquiring the lock below */
@@ -114,7 +114,7 @@
static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
enum mptcp_pm_status new_status)
{
- pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
+ pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
BIT(new_status));
if (msk->pm.status & BIT(new_status))
return false;
@@ -129,7 +129,7 @@
struct mptcp_pm_data *pm = &msk->pm;
bool announce = false;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&pm->lock);
@@ -153,14 +153,14 @@
void mptcp_pm_connection_closed(struct mptcp_sock *msk)
{
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
}
void mptcp_pm_subflow_established(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (!READ_ONCE(pm->work_pending))
return;
@@ -212,7 +212,7 @@
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
+ pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
READ_ONCE(pm->accept_addr));
mptcp_event_addr_announced(ssk, addr);
@@ -226,7 +226,9 @@
} else {
__MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
- } else if (!READ_ONCE(pm->accept_addr)) {
+ /* id0 should not have a different address */
+ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
+ (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
mptcp_pm_announce_addr(msk, addr, true);
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
@@ -243,7 +245,7 @@
{
struct mptcp_pm_data *pm = &msk->pm;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&pm->lock);
@@ -267,7 +269,7 @@
struct mptcp_pm_data *pm = &msk->pm;
u8 i;
- pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr);
+ pr_debug("msk=%p remote_ids_nr=%d\n", msk, rm_list->nr);
for (i = 0; i < rm_list->nr; i++)
mptcp_event_addr_removed(msk, rm_list->ids[i]);
@@ -299,19 +301,19 @@
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
- pr_debug("fail_seq=%llu", fail_seq);
+ pr_debug("fail_seq=%llu\n", fail_seq);
if (!READ_ONCE(msk->allow_infinite_fallback))
return;
if (!subflow->fail_tout) {
- pr_debug("send MP_FAIL response and infinite map");
+ pr_debug("send MP_FAIL response and infinite map\n");
subflow->send_mp_fail = 1;
subflow->send_infinite_map = 1;
tcp_send_ack(sk);
} else {
- pr_debug("MP_FAIL response received");
+ pr_debug("MP_FAIL response received\n");
WRITE_ONCE(subflow->fail_tout, 0);
}
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 3e4ad80..f891bc7 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -130,12 +130,15 @@
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
- struct sock_common *skc;
list_for_each_entry(subflow, list, node) {
- skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- remote_address(skc, &cur);
+ if (!((1 << inet_sk_state_load(ssk)) &
+ (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
+ continue;
+
+ remote_address((struct sock_common *)ssk, &cur);
if (mptcp_addresses_equal(&cur, daddr, daddr->port))
return true;
}
@@ -287,7 +290,7 @@
struct mptcp_sock *msk = entry->sock;
struct sock *sk = (struct sock *)msk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (!msk)
return;
@@ -306,7 +309,7 @@
spin_lock_bh(&msk->pm.lock);
if (!mptcp_pm_should_add_signal_addr(msk)) {
- pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
+ pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
mptcp_pm_announce_addr(msk, &entry->addr, false);
mptcp_pm_add_addr_send_ack(msk);
entry->retrans_times++;
@@ -387,7 +390,7 @@
struct sock *sk = (struct sock *)msk;
LIST_HEAD(free_list);
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
spin_lock_bh(&msk->pm.lock);
list_splice_init(&msk->pm.anno_list, &free_list);
@@ -473,7 +476,7 @@
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow;
- pr_debug("send ack for %s",
+ pr_debug("send ack for %s\n",
prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr"));
slow = lock_sock_fast(ssk);
@@ -585,6 +588,11 @@
__clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
msk->pm.add_addr_signaled++;
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
+
mptcp_pm_announce_addr(msk, &local.addr, false);
mptcp_pm_nl_addr_send_ack(msk);
@@ -607,8 +615,14 @@
fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
- msk->pm.local_addr_used++;
__clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
+ else /* local_addr_used is not decr for ID 0 */
+ msk->pm.local_addr_used++;
+
nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
if (nr == 0)
continue;
@@ -708,7 +722,7 @@
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
- pr_debug("accepted %d:%d remote family %d",
+ pr_debug("accepted %d:%d remote family %d\n",
msk->pm.add_addr_accepted, add_addr_accept_max,
msk->pm.remote.family);
@@ -737,13 +751,24 @@
spin_lock_bh(&msk->pm.lock);
if (sf_created) {
- msk->pm.add_addr_accepted++;
+ /* add_addr_accepted is not decr for ID 0 */
+ if (remote.id)
+ msk->pm.add_addr_accepted++;
if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
msk->pm.subflows >= subflows_max)
WRITE_ONCE(msk->pm.accept_addr, false);
}
}
+bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *remote)
+{
+ struct mptcp_addr_info mpc_remote;
+
+ remote_address((struct sock_common *)msk, &mpc_remote);
+ return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
+}
+
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow;
@@ -755,9 +780,12 @@
!mptcp_pm_should_rm_signal(msk))
return;
- subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
- if (subflow)
- mptcp_pm_send_ack(msk, subflow, false, false);
+ mptcp_for_each_subflow(msk, subflow) {
+ if (__mptcp_subflow_active(subflow)) {
+ mptcp_pm_send_ack(msk, subflow, false, false);
+ break;
+ }
+ }
}
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
@@ -767,7 +795,7 @@
{
struct mptcp_subflow_context *subflow;
- pr_debug("bkup=%d", bkup);
+ pr_debug("bkup=%d\n", bkup);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -790,11 +818,6 @@
return -EINVAL;
}
-static bool mptcp_local_id_match(const struct mptcp_sock *msk, u8 local_id, u8 id)
-{
- return local_id == id || (!local_id && msk->mpc_endpoint_id == id);
-}
-
static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list,
enum linux_mptcp_mib_field rm_type)
@@ -803,7 +826,7 @@
struct sock *sk = (struct sock *)msk;
u8 i;
- pr_debug("%s rm_list_nr %d",
+ pr_debug("%s rm_list_nr %d\n",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
msk_owned_by_me(msk);
@@ -827,12 +850,14 @@
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
u8 id = subflow_get_local_id(subflow);
+ if (inet_sk_state_load(ssk) == TCP_CLOSE)
+ continue;
if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
continue;
- if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+ if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id)
continue;
- pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
+ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n",
rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
i, rm_id, id, remote_id, msk->mpc_endpoint_id);
spin_unlock_bh(&msk->pm.lock);
@@ -889,7 +914,7 @@
spin_lock_bh(&msk->pm.lock);
- pr_debug("msk=%p status=%x", msk, pm->status);
+ pr_debug("msk=%p status=%x\n", msk, pm->status);
if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
@@ -1307,20 +1332,27 @@
return pm_nl_get_pernet(genl_info_net(info));
}
-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
+static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
+ struct mptcp_addr_info *addr)
{
struct mptcp_sock *msk;
long s_slot = 0, s_num = 0;
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
+ struct mptcp_addr_info mpc_addr;
if (!READ_ONCE(msk->fully_established) ||
mptcp_pm_is_userspace(msk))
goto next;
+ /* if the endp linked to the init sf is re-added with a != ID */
+ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+
lock_sock(sk);
spin_lock_bh(&msk->pm.lock);
+ if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
+ msk->mpc_endpoint_id = addr->id;
mptcp_pm_create_subflow_or_signal_addr(msk);
spin_unlock_bh(&msk->pm.lock);
release_sock(sk);
@@ -1393,7 +1425,7 @@
goto out_free;
}
- mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
+ mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
return 0;
out_free:
@@ -1438,6 +1470,12 @@
return false;
}
+static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
+{
+ return msk->mpc_endpoint_id == addr->id ? 0 : addr->id;
+}
+
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr,
bool force)
@@ -1445,7 +1483,7 @@
struct mptcp_rm_list list = { .nr = 0 };
bool ret;
- list.ids[list.nr++] = addr->id;
+ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
ret = remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
@@ -1472,13 +1510,11 @@
const struct mptcp_pm_addr_entry *entry)
{
const struct mptcp_addr_info *addr = &entry->addr;
- struct mptcp_rm_list list = { .nr = 0 };
+ struct mptcp_rm_list list = { .nr = 1 };
long s_slot = 0, s_num = 0;
struct mptcp_sock *msk;
- pr_debug("remove_id=%d", addr->id);
-
- list.ids[list.nr++] = addr->id;
+ pr_debug("remove_id=%d\n", addr->id);
while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
struct sock *sk = (struct sock *)msk;
@@ -1497,6 +1533,7 @@
mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
!(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+ list.ids[0] = mptcp_endp_get_local_id(msk, addr);
if (remove_subflow) {
spin_lock_bh(&msk->pm.lock);
mptcp_pm_nl_rm_subflow_received(msk, &list);
@@ -1509,6 +1546,8 @@
spin_unlock_bh(&msk->pm.lock);
}
+ if (msk->mpc_endpoint_id == entry->addr.id)
+ msk->mpc_endpoint_id = 0;
release_sock(sk);
next:
@@ -1603,6 +1642,7 @@
return ret;
}
+/* Called from the userspace PM only */
void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 };
@@ -1631,6 +1671,7 @@
}
}
+/* Called from the in-kernel PM only */
static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
struct list_head *rm_list)
{
@@ -1640,11 +1681,11 @@
list_for_each_entry(entry, rm_list, list) {
if (slist.nr < MPTCP_RM_IDS_MAX &&
lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
- slist.ids[slist.nr++] = entry->addr.id;
+ slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
if (alist.nr < MPTCP_RM_IDS_MAX &&
remove_anno_list_by_saddr(msk, &entry->addr))
- alist.ids[alist.nr++] = entry->addr.id;
+ alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr);
}
spin_lock_bh(&msk->pm.lock);
@@ -1941,7 +1982,7 @@
{
struct mptcp_rm_list list = { .nr = 0 };
- list.ids[list.nr++] = addr->id;
+ list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr);
spin_lock_bh(&msk->pm.lock);
mptcp_pm_nl_rm_subflow_received(msk, &list);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 0d536b1..37ebcb7 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -139,7 +139,7 @@
!skb_try_coalesce(to, from, &fragstolen, &delta))
return false;
- pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
+ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
@@ -217,7 +217,7 @@
end_seq = MPTCP_SKB_CB(skb)->end_seq;
max_seq = atomic64_read(&msk->rcv_wnd_sent);
- pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
+ pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
RB_EMPTY_ROOT(&msk->out_of_order_queue));
if (after64(end_seq, max_seq)) {
/* out of window */
@@ -643,7 +643,7 @@
}
}
- pr_debug("msk=%p ssk=%p", msk, ssk);
+ pr_debug("msk=%p ssk=%p\n", msk, ssk);
tp = tcp_sk(ssk);
do {
u32 map_remaining, offset;
@@ -724,7 +724,7 @@
u64 end_seq;
p = rb_first(&msk->out_of_order_queue);
- pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
while (p) {
skb = rb_to_skb(p);
if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
@@ -746,7 +746,7 @@
int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
/* skip overlapping data, if any */
- pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
+ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
delta);
MPTCP_SKB_CB(skb)->offset += delta;
@@ -1240,7 +1240,7 @@
size_t copy;
int i;
- pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
+ pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n",
msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
if (WARN_ON_ONCE(info->sent > info->limit ||
@@ -1341,7 +1341,7 @@
mpext->use_map = 1;
mpext->dsn64 = 1;
- pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
+ pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
mpext->data_seq, mpext->subflow_seq, mpext->data_len,
mpext->dsn64);
@@ -1892,7 +1892,7 @@
if (!msk->first_pending)
WRITE_ONCE(msk->first_pending, dfrag);
}
- pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
+ pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
!dfrag_collapsed);
@@ -2248,7 +2248,7 @@
}
}
- pr_debug("block timeout %ld", timeo);
+ pr_debug("block timeout %ld\n", timeo);
sk_wait_data(sk, &timeo, NULL);
}
@@ -2264,7 +2264,7 @@
}
}
- pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
+ pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n",
msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
skb_queue_empty(&msk->receive_queue), copied);
if (!(flags & MSG_PEEK))
@@ -2326,7 +2326,7 @@
continue;
}
- if (subflow->backup) {
+ if (subflow->backup || subflow->request_bkup) {
if (!backup)
backup = ssk;
continue;
@@ -2508,6 +2508,12 @@
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
struct mptcp_subflow_context *subflow)
{
+ /* The first subflow can already be closed and still in the list */
+ if (subflow->close_event_done)
+ return;
+
+ subflow->close_event_done = true;
+
if (sk->sk_state == TCP_ESTABLISHED)
mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
@@ -2533,8 +2539,11 @@
mptcp_for_each_subflow_safe(msk, subflow, tmp) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int ssk_state = inet_sk_state_load(ssk);
- if (inet_sk_state_load(ssk) != TCP_CLOSE)
+ if (ssk_state != TCP_CLOSE &&
+ (ssk_state != TCP_CLOSE_WAIT ||
+ inet_sk_state_load(sk) != TCP_ESTABLISHED))
continue;
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2714,7 +2723,7 @@
if (!ssk)
return;
- pr_debug("MP_FAIL doesn't respond, reset the subflow");
+ pr_debug("MP_FAIL doesn't respond, reset the subflow\n");
slow = lock_sock_fast(ssk);
mptcp_subflow_reset(ssk);
@@ -2888,7 +2897,7 @@
break;
default:
if (__mptcp_check_fallback(mptcp_sk(sk))) {
- pr_debug("Fallback");
+ pr_debug("Fallback\n");
ssk->sk_shutdown |= how;
tcp_shutdown(ssk, how);
@@ -2898,7 +2907,7 @@
WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt);
mptcp_schedule_work(sk);
} else {
- pr_debug("Sending DATA_FIN on subflow %p", ssk);
+ pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
tcp_send_ack(ssk);
if (!mptcp_rtx_timer_pending(sk))
mptcp_reset_rtx_timer(sk);
@@ -2964,7 +2973,7 @@
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
+ pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n",
msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
msk->snd_nxt, msk->write_seq);
@@ -2988,7 +2997,7 @@
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
+ pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n",
msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
!!mptcp_send_head(sk));
@@ -3003,7 +3012,7 @@
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
might_sleep();
@@ -3111,7 +3120,7 @@
mptcp_set_state(sk, TCP_CLOSE);
sock_hold(sk);
- pr_debug("msk=%p state=%d", sk, sk->sk_state);
+ pr_debug("msk=%p state=%d\n", sk, sk->sk_state);
if (msk->token)
mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
@@ -3543,7 +3552,7 @@
{
struct mptcp_sock *msk = mptcp_sk(sk);
- pr_debug("msk=%p, ssk=%p", msk, msk->first);
+ pr_debug("msk=%p, ssk=%p\n", msk, msk->first);
if (WARN_ON_ONCE(!msk->first))
return -EINVAL;
@@ -3560,7 +3569,7 @@
sk = subflow->conn;
msk = mptcp_sk(sk);
- pr_debug("msk=%p, token=%u", sk, subflow->token);
+ pr_debug("msk=%p, token=%u\n", sk, subflow->token);
subflow->map_seq = subflow->iasn;
subflow->map_subflow_seq = 1;
@@ -3589,7 +3598,7 @@
struct sock *parent = (void *)msk;
bool ret = true;
- pr_debug("msk=%p, subflow=%p", msk, subflow);
+ pr_debug("msk=%p, subflow=%p\n", msk, subflow);
/* mptcp socket already closing? */
if (!mptcp_is_fully_established(parent)) {
@@ -3635,7 +3644,7 @@
static void mptcp_shutdown(struct sock *sk, int how)
{
- pr_debug("sk=%p, how=%d", sk, how);
+ pr_debug("sk=%p, how=%d\n", sk, how);
if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
__mptcp_wr_shutdown(sk);
@@ -3856,7 +3865,7 @@
struct sock *ssk;
int err;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
lock_sock(sk);
@@ -3895,7 +3904,7 @@
struct mptcp_sock *msk = mptcp_sk(sock->sk);
struct sock *ssk, *newsk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
/* Buggy applications can call accept on socket states other then LISTEN
* but no need to allocate the first subflow just to error out.
@@ -3904,12 +3913,12 @@
if (!ssk)
return -EINVAL;
- pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
+ pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk));
newsk = inet_csk_accept(ssk, arg);
if (!newsk)
return arg->err;
- pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
+ pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk));
if (sk_is_mptcp(newsk)) {
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
@@ -4002,7 +4011,7 @@
sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
- pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
+ pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
if (state == TCP_LISTEN) {
struct sock *ssk = READ_ONCE(msk->first);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index a1c1b0f..3b22313 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -524,7 +524,8 @@
stale : 1, /* unable to snd/rcv data, do not use for xmit */
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
- __unused : 10;
+ close_event_done : 1, /* has done the post-closed part */
+ __unused : 9;
bool data_avail;
bool scheduled;
u32 remote_nonce;
@@ -992,6 +993,8 @@
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
+bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *remote);
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
@@ -1177,7 +1180,7 @@
static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
{
if (__mptcp_check_fallback(msk)) {
- pr_debug("TCP fallback already done (msk=%p)", msk);
+ pr_debug("TCP fallback already done (msk=%p)\n", msk);
return;
}
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
@@ -1213,7 +1216,7 @@
}
}
-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
+#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 4a7fd05..78ed508 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -86,7 +86,7 @@
list_add_tail_rcu(&sched->list, &mptcp_sched_list);
spin_unlock(&mptcp_sched_list_lock);
- pr_debug("%s registered", sched->name);
+ pr_debug("%s registered\n", sched->name);
return 0;
}
@@ -118,7 +118,7 @@
if (msk->sched->init)
msk->sched->init(msk);
- pr_debug("sched=%s", msk->sched->name);
+ pr_debug("sched=%s\n", msk->sched->name);
return 0;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 2026a9a..505445a 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -873,7 +873,7 @@
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
if (level == SOL_SOCKET)
return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
@@ -1453,7 +1453,7 @@
struct mptcp_sock *msk = mptcp_sk(sk);
struct sock *ssk;
- pr_debug("msk=%p", msk);
+ pr_debug("msk=%p\n", msk);
/* @@ the meaning of setsockopt() when the socket is connected and
* there are multiple subflows is not yet defined. It is up to the
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index a21c712..064ab32 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -39,7 +39,7 @@
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
- pr_debug("subflow_req=%p", subflow_req);
+ pr_debug("subflow_req=%p\n", subflow_req);
if (subflow_req->msk)
sock_put((struct sock *)subflow_req->msk);
@@ -146,7 +146,7 @@
struct mptcp_options_received mp_opt;
bool opt_mp_capable, opt_mp_join;
- pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
+ pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -221,7 +221,7 @@
}
if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
- pr_debug("syn inet_sport=%d %d",
+ pr_debug("syn inet_sport=%d %d\n",
ntohs(inet_sk(sk_listener)->inet_sport),
ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
@@ -243,7 +243,7 @@
subflow_init_req_cookie_join_save(subflow_req, skb);
}
- pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+ pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
subflow_req->remote_nonce, subflow_req->msk);
}
@@ -527,7 +527,7 @@
subflow->rel_write_seq = 1;
subflow->conn_finished = 1;
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
- pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
+ pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
@@ -559,7 +559,7 @@
subflow->thmac = mp_opt.thmac;
subflow->remote_nonce = mp_opt.nonce;
WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
- pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
+ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d\n",
subflow, subflow->thmac, subflow->remote_nonce,
subflow->backup);
@@ -585,7 +585,7 @@
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
if (subflow_use_different_dport(msk, sk)) {
- pr_debug("synack inet_dport=%d %d",
+ pr_debug("synack inet_dport=%d %d\n",
ntohs(inet_sk(sk)->inet_dport),
ntohs(inet_sk(parent)->inet_dport));
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
@@ -655,7 +655,7 @@
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
/* Never answer to SYNs sent to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -686,7 +686,7 @@
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
if (skb->protocol == htons(ETH_P_IP))
return subflow_v4_conn_request(sk, skb);
@@ -807,7 +807,7 @@
struct mptcp_sock *owner;
struct sock *child;
- pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+ pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
/* After child creation we must look for MPC even when options
* are not parsed
@@ -898,7 +898,7 @@
ctx->conn = (struct sock *)owner;
if (subflow_use_different_sport(owner, sk)) {
- pr_debug("ack inet_sport=%d %d",
+ pr_debug("ack inet_sport=%d %d\n",
ntohs(inet_sk(sk)->inet_sport),
ntohs(inet_sk((struct sock *)owner)->inet_sport));
if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
@@ -961,7 +961,7 @@
static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
- pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
+ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
ssn, subflow->map_subflow_seq, subflow->map_data_len);
}
@@ -1121,7 +1121,7 @@
data_len = mpext->data_len;
if (data_len == 0) {
- pr_debug("infinite mapping received");
+ pr_debug("infinite mapping received\n");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
return MAPPING_INVALID;
@@ -1133,7 +1133,7 @@
if (data_len == 1) {
bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
mpext->dsn64);
- pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
+ pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
if (subflow->map_valid) {
/* A DATA_FIN might arrive in a DSS
* option before the previous mapping
@@ -1159,7 +1159,7 @@
data_fin_seq &= GENMASK_ULL(31, 0);
mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
- pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
+ pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
data_fin_seq, mpext->dsn64);
/* Adjust for DATA_FIN using 1 byte of sequence space */
@@ -1205,7 +1205,7 @@
if (unlikely(subflow->map_csum_reqd != csum_reqd))
return MAPPING_INVALID;
- pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
+ pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u\n",
subflow->map_seq, subflow->map_subflow_seq,
subflow->map_data_len, subflow->map_csum_reqd,
subflow->map_data_csum);
@@ -1240,7 +1240,7 @@
avail_len = skb->len - offset;
incr = limit >= avail_len ? avail_len + fin : limit;
- pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
+ pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
offset, subflow->map_subflow_seq);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
tcp_sk(ssk)->copied_seq += incr;
@@ -1255,12 +1255,16 @@
/* sched mptcp worker to remove the subflow if no more data is pending */
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
{
- if (likely(ssk->sk_state != TCP_CLOSE))
+ struct sock *sk = (struct sock *)msk;
+
+ if (likely(ssk->sk_state != TCP_CLOSE &&
+ (ssk->sk_state != TCP_CLOSE_WAIT ||
+ inet_sk_state_load(sk) != TCP_ESTABLISHED)))
return;
if (skb_queue_empty(&ssk->sk_receive_queue) &&
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
- mptcp_schedule_work((struct sock *)msk);
+ mptcp_schedule_work(sk);
}
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
@@ -1337,7 +1341,7 @@
old_ack = READ_ONCE(msk->ack_seq);
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
- pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
+ pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
ack_seq);
if (unlikely(before64(ack_seq, old_ack))) {
mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
@@ -1409,7 +1413,7 @@
subflow->map_valid = 0;
WRITE_ONCE(subflow->data_avail, false);
- pr_debug("Done with mapping: seq=%u data_len=%u",
+ pr_debug("Done with mapping: seq=%u data_len=%u\n",
subflow->map_subflow_seq,
subflow->map_data_len);
}
@@ -1519,7 +1523,7 @@
target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
- pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
+ pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
if (likely(icsk->icsk_af_ops == target))
@@ -1612,7 +1616,7 @@
goto failed;
mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
- pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
remote_token, local_id, remote_id);
subflow->remote_token = remote_token;
WRITE_ONCE(subflow->remote_id, remote_id);
@@ -1747,7 +1751,7 @@
SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
subflow = mptcp_subflow_ctx(sf->sk);
- pr_debug("subflow=%p", subflow);
+ pr_debug("subflow=%p\n", subflow);
*new_sock = sf;
sock_hold(sk);
@@ -1776,7 +1780,7 @@
INIT_LIST_HEAD(&ctx->node);
INIT_LIST_HEAD(&ctx->delegated_node);
- pr_debug("subflow=%p", ctx);
+ pr_debug("subflow=%p\n", ctx);
ctx->tcp_sock = sk;
WRITE_ONCE(ctx->local_id, -1);
@@ -1927,7 +1931,7 @@
goto out;
}
- pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
+ pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
tp->is_mptcp = 1;
ctx->icsk_af_ops = icsk->icsk_af_ops;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 2389747..19a49af 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -663,7 +663,9 @@
pband = &q->band_flows[q->band_nr];
pband->credit = min(pband->credit + pband->quantum,
pband->quantum);
- goto begin;
+ if (pband->credit > 0)
+ goto begin;
+ retry = 0;
}
if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_range_ns(&q->watchdog,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 5adf0c0..7d315a1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2260,12 +2260,6 @@
}
}
- /* Update socket peer label if first association. */
- if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
-
/* Set temp so that it won't be added into hashtable */
new_asoc->temp = 1;
@@ -2274,6 +2268,22 @@
*/
action = sctp_tietags_compare(new_asoc, asoc);
+ /* In cases C and E the association doesn't enter the ESTABLISHED
+ * state, so there is no need to call security_sctp_assoc_request().
+ */
+ switch (action) {
+ case 'A': /* Association restart. */
+ case 'B': /* Collision case B. */
+ case 'D': /* Collision case D. */
+ /* Update socket peer label if first association. */
+ if (security_sctp_assoc_request((struct sctp_association *)asoc,
+ chunk->head_skb ?: chunk->skb)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+ break;
+ }
+
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 39032224..4427572 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -4015,16 +4015,6 @@
}
}
-# Block comment styles
-# Networking with an initial /*
- if ($realfile =~ m@^(drivers/net/|net/)@ &&
- $prevrawline =~ /^\+[ \t]*\/\*[ \t]*$/ &&
- $rawline =~ /^\+[ \t]*\*/ &&
- $realline > 3) { # Do not warn about the initial copyright comment block after SPDX-License-Identifier
- WARN("NETWORKING_BLOCK_COMMENT_STYLE",
- "networking block comments don't use an empty /* line, use /* Comment...\n" . $hereprev);
- }
-
# Block comments use * on subsequent lines
if ($prevline =~ /$;[ \t]*$/ && #ends in comment
$prevrawline =~ /^\+.*?\/\*/ && #starting /*
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index bfa61e0..400eca4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6660,8 +6660,8 @@
*/
static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
- return __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_SELINUX,
- ctx, ctxlen, 0);
+ return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SELINUX,
+ ctx, ctxlen, 0, NULL);
}
static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 4164699..002a1b9 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4880,8 +4880,8 @@
static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
- return __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK,
- ctx, ctxlen, 0);
+ return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK,
+ ctx, ctxlen, 0, NULL);
}
static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 8c4ee50..6be548b 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -537,6 +537,9 @@
return NULL;
if (! dest->accept_input)
goto __not_avail;
+ if (snd_seq_ev_is_ump(event))
+ return dest; /* ok - no filter checks */
+
if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
! test_bit(event->type, dest->event_filter))
goto __not_avail;
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index a9dfd62..e3ac0e2 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -1003,7 +1003,7 @@
goto err;
}
- cs35l56->base.cal_index = cs35l56->index;
+ cs35l56->base.cal_index = -1;
cs35l56_init_cs_dsp(&cs35l56->base, &cs35l56->cs_dsp);
cs35l56->cs_dsp.client_ops = &cs35l56_hda_client_ops;
diff --git a/sound/pci/hda/hda_component.c b/sound/pci/hda/hda_component.c
index 7b19cb3..b7dfdb1 100644
--- a/sound/pci/hda/hda_component.c
+++ b/sound/pci/hda/hda_component.c
@@ -141,8 +141,7 @@
int ret;
/* Init shared and component specific data */
- memset(parent, 0, sizeof(*parent));
- mutex_init(&parent->mutex);
+ memset(parent->comps, 0, sizeof(parent->comps));
parent->codec = cdc;
mutex_lock(&parent->mutex);
@@ -164,6 +163,8 @@
struct hda_scodec_match *sm;
int ret, i;
+ mutex_init(&parent->mutex);
+
for (i = 0; i < count; i++) {
sm = devm_kmalloc(dev, sizeof(*sm), GFP_KERNEL);
if (!sm)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d022a25..588738c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4930,6 +4930,30 @@
}
}
+static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay)
+{
+ if (delay <= 0)
+ delay = 75;
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ msleep(delay);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ msleep(delay);
+}
+
+static void alc_hp_enable_unmute(struct hda_codec *codec, unsigned int delay)
+{
+ if (delay <= 0)
+ delay = 75;
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ msleep(delay);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+ msleep(delay);
+}
+
static const struct coef_fw alc225_pre_hsmode[] = {
UPDATE_COEF(0x4a, 1<<8, 0),
UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
@@ -5031,6 +5055,7 @@
case 0x10ec0236:
case 0x10ec0256:
case 0x19e58326:
+ alc_hp_mute_disable(codec, 75);
alc_process_coef_fw(codec, coef0256);
break;
case 0x10ec0234:
@@ -5065,6 +5090,7 @@
case 0x10ec0295:
case 0x10ec0289:
case 0x10ec0299:
+ alc_hp_mute_disable(codec, 75);
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
@@ -5290,6 +5316,7 @@
case 0x10ec0299:
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
+ alc_hp_enable_unmute(codec, 75);
break;
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
@@ -5302,6 +5329,7 @@
alc_write_coef_idx(codec, 0x45, 0xc089);
msleep(50);
alc_process_coef_fw(codec, coef0256);
+ alc_hp_enable_unmute(codec, 75);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -5399,6 +5427,7 @@
case 0x10ec0256:
case 0x19e58326:
alc_process_coef_fw(codec, coef0256);
+ alc_hp_enable_unmute(codec, 75);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -5447,6 +5476,7 @@
alc_process_coef_fw(codec, coef0225_2);
else
alc_process_coef_fw(codec, coef0225_1);
+ alc_hp_enable_unmute(codec, 75);
break;
case 0x10ec0867:
alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
@@ -5514,6 +5544,7 @@
case 0x10ec0256:
case 0x19e58326:
alc_process_coef_fw(codec, coef0256);
+ alc_hp_enable_unmute(codec, 75);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -5551,6 +5582,7 @@
case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
+ alc_hp_enable_unmute(codec, 75);
break;
}
codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
@@ -5619,25 +5651,21 @@
alc_write_coef_idx(codec, 0x06, 0x6104);
alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
- msleep(80);
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
alc_process_coef_fw(codec, coef0255);
msleep(300);
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
-
+ if (!is_ctia) {
+ alc_write_coef_idx(codec, 0x45, 0xe089);
+ msleep(100);
+ val = alc_read_coef_idx(codec, 0x46);
+ if ((val & 0x0070) == 0x0070)
+ is_ctia = false;
+ else
+ is_ctia = true;
+ }
alc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);
alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
-
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
- msleep(80);
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
break;
case 0x10ec0234:
case 0x10ec0274:
@@ -5714,12 +5742,6 @@
case 0x10ec0295:
case 0x10ec0289:
case 0x10ec0299:
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
- msleep(80);
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
val = alc_read_coef_idx(codec, 0x45);
@@ -5736,15 +5758,19 @@
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x00f0) == 0x00f0;
}
+ if (!is_ctia) {
+ alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x38<<10);
+ alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8);
+ msleep(100);
+ val = alc_read_coef_idx(codec, 0x46);
+ if ((val & 0x00f0) == 0x00f0)
+ is_ctia = false;
+ else
+ is_ctia = true;
+ }
alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
-
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
- msleep(80);
- snd_hda_codec_write(codec, 0x21, 0,
- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
break;
case 0x10ec0867:
is_ctia = true;
@@ -10315,6 +10341,7 @@
SND_PCI_QUIRK(0x103c, 0x8c15, "HP Spectre x360 2-in-1 Laptop 14-eu0xxx", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS),
SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -10353,6 +10380,7 @@
SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS),
SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
@@ -10513,6 +10541,7 @@
SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc1ca, "Samsung Galaxy Book3 Pro 360 (NP960QFG-KB1US)", ALC298_FIXUP_SAMSUNG_AMP2),
+ SND_PCI_QUIRK(0x144d, 0xc1cc, "Samsung Galaxy Book3 Ultra (NT960XFH-XD92G))", ALC298_FIXUP_SAMSUNG_AMP2),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
diff --git a/sound/soc/amd/acp/acp-legacy-mach.c b/sound/soc/amd/acp/acp-legacy-mach.c
index 47c3b5f..0d529e3 100644
--- a/sound/soc/amd/acp/acp-legacy-mach.c
+++ b/sound/soc/amd/acp/acp-legacy-mach.c
@@ -227,6 +227,8 @@
},
{ }
};
+MODULE_DEVICE_TABLE(platform, board_ids);
+
static struct platform_driver acp_asoc_audio = {
.driver = {
.pm = &snd_soc_pm_ops,
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
index fc59ea3..b3a702d 100644
--- a/sound/soc/amd/acp/acp-sof-mach.c
+++ b/sound/soc/amd/acp/acp-sof-mach.c
@@ -158,6 +158,8 @@
},
{ }
};
+MODULE_DEVICE_TABLE(platform, board_ids);
+
static struct platform_driver acp_asoc_audio = {
.driver = {
.name = "sof_mach",
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index 83a75a3..81abe2e 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -44,6 +44,7 @@
},
{},
};
+MODULE_DEVICE_TABLE(platform, db1200_pids);
/*------------------------- AC97 PART ---------------------------*/
diff --git a/sound/soc/codecs/cs-amp-lib-test.c b/sound/soc/codecs/cs-amp-lib-test.c
index 15f991b..8169ec8 100644
--- a/sound/soc/codecs/cs-amp-lib-test.c
+++ b/sound/soc/codecs/cs-amp-lib-test.c
@@ -38,6 +38,7 @@
{
struct cs_amp_lib_test_priv *priv = test->priv;
unsigned int blob_size;
+ int i;
blob_size = offsetof(struct cirrus_amp_efi_data, data) +
sizeof(struct cirrus_amp_cal_data) * num_amps;
@@ -49,6 +50,14 @@
priv->cal_blob->count = num_amps;
get_random_bytes(priv->cal_blob->data, sizeof(struct cirrus_amp_cal_data) * num_amps);
+
+ /* Ensure all timestamps are non-zero to mark the entry valid. */
+ for (i = 0; i < num_amps; i++)
+ priv->cal_blob->data[i].calTime[0] |= 1;
+
+ /* Ensure that all UIDs are non-zero and unique. */
+ for (i = 0; i < num_amps; i++)
+ *(u8 *)&priv->cal_blob->data[i].calTarget[0] = i + 1;
}
static u64 cs_amp_lib_test_get_target_uid(struct kunit *test)
diff --git a/sound/soc/codecs/cs-amp-lib.c b/sound/soc/codecs/cs-amp-lib.c
index 605964a..51b128c 100644
--- a/sound/soc/codecs/cs-amp-lib.c
+++ b/sound/soc/codecs/cs-amp-lib.c
@@ -182,6 +182,10 @@
for (i = 0; i < efi_data->count; ++i) {
u64 cal_target = cs_amp_cal_target_u64(&efi_data->data[i]);
+ /* Skip empty entries */
+ if (!efi_data->data[i].calTime[0] && !efi_data->data[i].calTime[1])
+ continue;
+
/* Skip entries with unpopulated silicon ID */
if (cal_target == 0)
continue;
@@ -193,7 +197,8 @@
}
}
- if (!cal && (amp_index >= 0) && (amp_index < efi_data->count)) {
+ if (!cal && (amp_index >= 0) && (amp_index < efi_data->count) &&
+ (efi_data->data[amp_index].calTime[0] || efi_data->data[amp_index].calTime[1])) {
u64 cal_target = cs_amp_cal_target_u64(&efi_data->data[amp_index]);
/*
diff --git a/sound/soc/codecs/lpass-macro-common.h b/sound/soc/codecs/lpass-macro-common.h
index 21cb30a..fb4b96c 100644
--- a/sound/soc/codecs/lpass-macro-common.h
+++ b/sound/soc/codecs/lpass-macro-common.h
@@ -49,6 +49,12 @@
static inline const char *lpass_macro_get_codec_version_string(int version)
{
switch (version) {
+ case LPASS_CODEC_VERSION_1_0:
+ return "v1.0";
+ case LPASS_CODEC_VERSION_1_1:
+ return "v1.1";
+ case LPASS_CODEC_VERSION_1_2:
+ return "v1.2";
case LPASS_CODEC_VERSION_2_0:
return "v2.0";
case LPASS_CODEC_VERSION_2_1:
diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
index a62ccd0..8454193 100644
--- a/sound/soc/codecs/lpass-va-macro.c
+++ b/sound/soc/codecs/lpass-va-macro.c
@@ -1485,6 +1485,10 @@
if ((core_id_0 == 0x02) && (core_id_1 == 0x0F) && (core_id_2 == 0x80 || core_id_2 == 0x81))
version = LPASS_CODEC_VERSION_2_8;
+ if (version == LPASS_CODEC_VERSION_UNKNOWN)
+ dev_warn(va->dev, "Unknown Codec version, ID: %02x / %02x / %02x\n",
+ core_id_0, core_id_1, core_id_2);
+
lpass_macro_set_codec_version(version);
dev_dbg(va->dev, "LPASS Codec Version %s\n", lpass_macro_get_codec_version_string(version));
diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c
index 13926f4..af296b7 100644
--- a/sound/soc/codecs/wcd937x.c
+++ b/sound/soc/codecs/wcd937x.c
@@ -242,10 +242,9 @@
static void wcd937x_reset(struct wcd937x_priv *wcd937x)
{
- usleep_range(20, 30);
-
gpiod_set_value(wcd937x->reset_gpio, 1);
-
+ usleep_range(20, 30);
+ gpiod_set_value(wcd937x->reset_gpio, 0);
usleep_range(20, 30);
}
diff --git a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
index ccb6c1f..73e5c63 100644
--- a/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
+++ b/sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
@@ -2748,6 +2748,7 @@
case AFE_ASRC12_NEW_CON9:
case AFE_LRCK_CNT:
case AFE_DAC_MON0:
+ case AFE_DAC_CON0:
case AFE_DL2_CUR:
case AFE_DL3_CUR:
case AFE_DL6_CUR:
diff --git a/sound/soc/sof/amd/acp-dsp-offset.h b/sound/soc/sof/amd/acp-dsp-offset.h
index 59afbe2..072b703 100644
--- a/sound/soc/sof/amd/acp-dsp-offset.h
+++ b/sound/soc/sof/amd/acp-dsp-offset.h
@@ -76,13 +76,15 @@
#define DSP_SW_INTR_CNTL_OFFSET 0x0
#define DSP_SW_INTR_STAT_OFFSET 0x4
#define DSP_SW_INTR_TRIG_OFFSET 0x8
-#define ACP_ERROR_STATUS 0x18C4
+#define ACP3X_ERROR_STATUS 0x18C4
+#define ACP6X_ERROR_STATUS 0x1A4C
#define ACP3X_AXI2DAGB_SEM_0 0x1880
#define ACP5X_AXI2DAGB_SEM_0 0x1884
#define ACP6X_AXI2DAGB_SEM_0 0x1874
/* ACP common registers to report errors related to I2S & SoundWire interfaces */
-#define ACP_SW0_I2S_ERROR_REASON 0x18B4
+#define ACP3X_SW_I2S_ERROR_REASON 0x18C8
+#define ACP6X_SW0_I2S_ERROR_REASON 0x18B4
#define ACP_SW1_I2S_ERROR_REASON 0x1A50
/* Registers from ACP_SHA block */
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index 74fd5f2..85b58c8 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -92,6 +92,7 @@
unsigned int idx, unsigned int dscr_count)
{
struct snd_sof_dev *sdev = adata->dev;
+ const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int val, status;
int ret;
@@ -102,7 +103,7 @@
val & (1 << ch), ACP_REG_POLL_INTERVAL,
ACP_REG_POLL_TIMEOUT_US);
if (ret < 0) {
- status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS);
+ status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->acp_error_stat);
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32));
dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
@@ -263,6 +264,17 @@
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
+
+ /* psp_send_cmd only required for vangogh platform (rev - 5) */
+ if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
+ /* Modify IRAM and DRAM size */
+ ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
+ if (ret)
+ return ret;
+ ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
+ if (ret)
+ return ret;
+ }
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
@@ -280,17 +292,6 @@
return ret;
}
- /* psp_send_cmd only required for vangogh platform (rev - 5) */
- if (desc->rev == 5 && !(adata->quirks && adata->quirks->skip_iram_dram_size_mod)) {
- /* Modify IRAM and DRAM size */
- ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | IRAM_DRAM_FENCE_2);
- if (ret)
- return ret;
- ret = psp_send_cmd(adata, MBOX_ACP_IRAM_DRAM_FENCE_COMMAND | MBOX_ISREADY_FLAG);
- if (ret)
- return ret;
- }
-
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
@@ -402,9 +403,11 @@
if (val & ACP_ERROR_IRQ_MASK) {
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, ACP_ERROR_IRQ_MASK);
- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_SW0_I2S_ERROR_REASON, 0);
- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_SW1_I2S_ERROR_REASON, 0);
- snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + ACP_ERROR_STATUS, 0);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_sw0_i2s_err_reason, 0);
+ /* ACP_SW1_I2S_ERROR_REASON is newly added register from rmb platform onwards */
+ if (desc->rev >= 6)
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SW1_I2S_ERROR_REASON, 0);
+ snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_error_stat, 0);
irq_flag = 1;
}
@@ -430,6 +433,7 @@
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int base = desc->pgfsm_base;
unsigned int val;
+ unsigned int acp_pgfsm_status_mask, acp_pgfsm_cntl_mask;
int ret;
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
@@ -437,9 +441,23 @@
if (val == ACP_POWERED_ON)
return 0;
- if (val & ACP_PGFSM_STATUS_MASK)
+ switch (desc->rev) {
+ case 3:
+ case 5:
+ acp_pgfsm_status_mask = ACP3X_PGFSM_STATUS_MASK;
+ acp_pgfsm_cntl_mask = ACP3X_PGFSM_CNTL_POWER_ON_MASK;
+ break;
+ case 6:
+ acp_pgfsm_status_mask = ACP6X_PGFSM_STATUS_MASK;
+ acp_pgfsm_cntl_mask = ACP6X_PGFSM_CNTL_POWER_ON_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (val & acp_pgfsm_status_mask)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
- ACP_PGFSM_CNTL_POWER_ON_MASK);
+ acp_pgfsm_cntl_mask);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
!val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
index 87e79d5..61b28df8 100644
--- a/sound/soc/sof/amd/acp.h
+++ b/sound/soc/sof/amd/acp.h
@@ -25,8 +25,11 @@
#define ACP_REG_POLL_TIMEOUT_US 2000
#define ACP_DMA_COMPLETE_TIMEOUT_US 5000
-#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01
-#define ACP_PGFSM_STATUS_MASK 0x03
+#define ACP3X_PGFSM_CNTL_POWER_ON_MASK 0x01
+#define ACP3X_PGFSM_STATUS_MASK 0x03
+#define ACP6X_PGFSM_CNTL_POWER_ON_MASK 0x07
+#define ACP6X_PGFSM_STATUS_MASK 0x0F
+
#define ACP_POWERED_ON 0x00
#define ACP_ASSERT_RESET 0x01
#define ACP_RELEASE_RESET 0x00
@@ -203,6 +206,8 @@
u32 probe_reg_offset;
u32 reg_start_addr;
u32 reg_end_addr;
+ u32 acp_error_stat;
+ u32 acp_sw0_i2s_err_reason;
u32 sdw_max_link_count;
u64 sdw_acpi_dev_addr;
};
diff --git a/sound/soc/sof/amd/pci-acp63.c b/sound/soc/sof/amd/pci-acp63.c
index fc89844..986f592 100644
--- a/sound/soc/sof/amd/pci-acp63.c
+++ b/sound/soc/sof/amd/pci-acp63.c
@@ -35,6 +35,8 @@
.ext_intr_cntl = ACP6X_EXTERNAL_INTR_CNTL,
.ext_intr_stat = ACP6X_EXT_INTR_STAT,
.ext_intr_stat1 = ACP6X_EXT_INTR_STAT1,
+ .acp_error_stat = ACP6X_ERROR_STATUS,
+ .acp_sw0_i2s_err_reason = ACP6X_SW0_I2S_ERROR_REASON,
.dsp_intr_base = ACP6X_DSP_SW_INTR_BASE,
.sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
index 4bc3095..a366f90 100644
--- a/sound/soc/sof/amd/pci-rmb.c
+++ b/sound/soc/sof/amd/pci-rmb.c
@@ -33,6 +33,8 @@
.pgfsm_base = ACP6X_PGFSM_BASE,
.ext_intr_stat = ACP6X_EXT_INTR_STAT,
.dsp_intr_base = ACP6X_DSP_SW_INTR_BASE,
+ .acp_error_stat = ACP6X_ERROR_STATUS,
+ .acp_sw0_i2s_err_reason = ACP6X_SW0_I2S_ERROR_REASON,
.sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
.fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
index e08875b..2b7c534 100644
--- a/sound/soc/sof/amd/pci-rn.c
+++ b/sound/soc/sof/amd/pci-rn.c
@@ -33,6 +33,8 @@
.pgfsm_base = ACP3X_PGFSM_BASE,
.ext_intr_stat = ACP3X_EXT_INTR_STAT,
.dsp_intr_base = ACP3X_DSP_SW_INTR_BASE,
+ .acp_error_stat = ACP3X_ERROR_STATUS,
+ .acp_sw0_i2s_err_reason = ACP3X_SW_I2S_ERROR_REASON,
.sram_pte_offset = ACP3X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP3X_AXI2DAGB_SEM_0,
.acp_clkmux_sel = ACP3X_CLKMUX_SEL,
diff --git a/sound/soc/sof/mediatek/mt8195/mt8195.c b/sound/soc/sof/mediatek/mt8195/mt8195.c
index 1c6e035..82d221f 100644
--- a/sound/soc/sof/mediatek/mt8195/mt8195.c
+++ b/sound/soc/sof/mediatek/mt8195/mt8195.c
@@ -575,6 +575,9 @@
.compatible = "google,tomato",
.sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682.tplg"
}, {
+ .compatible = "google,dojo",
+ .sof_tplg_filename = "sof-mt8195-mt6359-max98390-rt5682.tplg"
+ }, {
.compatible = "mediatek,mt8195",
.sof_tplg_filename = "sof-mt8195.tplg"
}, {
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 6343f40..4927b9a 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -825,7 +825,7 @@
{
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
- .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
+ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
.dst_ioas_id = self->ioas_id,
.src_ioas_id = self->ioas_id,
.length = PAGE_SIZE,
@@ -1318,7 +1318,7 @@
{
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
- .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
+ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
.src_ioas_id = self->ioas_id,
.dst_iova = MOCK_APERTURE_START,
.length = MOCK_PAGE_SIZE,
@@ -1608,7 +1608,7 @@
};
struct iommu_ioas_copy copy_cmd = {
.size = sizeof(copy_cmd),
- .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
+ .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
.dst_ioas_id = self->ioas_id,
.dst_iova = MOCK_APERTURE_START,
.length = BUFFER_SIZE,
diff --git a/tools/testing/selftests/livepatch/test-livepatch.sh b/tools/testing/selftests/livepatch/test-livepatch.sh
index 65c9c05..bd13257 100755
--- a/tools/testing/selftests/livepatch/test-livepatch.sh
+++ b/tools/testing/selftests/livepatch/test-livepatch.sh
@@ -139,11 +139,8 @@
grep 'live patched' /proc/cmdline > /dev/kmsg
grep 'live patched' /proc/meminfo > /dev/kmsg
-mods=(/sys/kernel/livepatch/*)
-nmods=${#mods[@]}
-if [ "$nmods" -ne 1 ]; then
- die "Expecting only one moduled listed, found $nmods"
-fi
+loop_until 'mods=(/sys/kernel/livepatch/*); nmods=${#mods[@]}; [[ "$nmods" -eq 1 ]]' ||
+ die "Expecting only one moduled listed, found $nmods"
# These modules were disabled by the atomic replace
for mod in $MOD_LIVEPATCH3 $MOD_LIVEPATCH2 $MOD_LIVEPATCH1; do
diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
index 648868f..c3554876 100755
--- a/tools/testing/selftests/net/forwarding/local_termination.sh
+++ b/tools/testing/selftests/net/forwarding/local_termination.sh
@@ -571,6 +571,10 @@
cleanup()
{
pre_cleanup
+
+ ip link set $h2 down
+ ip link set $h1 down
+
vrf_cleanup
}
diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh
index af3b398..9e677aa 100755
--- a/tools/testing/selftests/net/forwarding/no_forwarding.sh
+++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh
@@ -233,6 +233,9 @@
{
pre_cleanup
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
h2_destroy
h1_destroy
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 89e553e..a4762c4 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -420,12 +420,17 @@
fi
}
+start_events()
+{
+ mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid
+ mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid
+}
+
reset_with_events()
{
reset "${1}" || return 1
- mptcp_lib_events "${ns1}" "${evts_ns1}" evts_ns1_pid
- mptcp_lib_events "${ns2}" "${evts_ns2}" evts_ns2_pid
+ start_events
}
reset_with_tcp_filter()
@@ -1112,26 +1117,26 @@
print_check "sum"
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
- if [ "$count" != "$csum_ns1" ]; then
+ if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then
extra_msg+=" ns1=$count"
fi
if [ -z "$count" ]; then
print_skip
elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
- { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
+ { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
fail_test "got $count data checksum error[s] expected $csum_ns1"
else
print_ok
fi
print_check "csum"
count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
- if [ "$count" != "$csum_ns2" ]; then
+ if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then
extra_msg+=" ns2=$count"
fi
if [ -z "$count" ]; then
print_skip
elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
- { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
+ { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
fail_test "got $count data checksum error[s] expected $csum_ns2"
else
print_ok
@@ -1169,13 +1174,13 @@
print_check "ftx"
count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
- if [ "$count" != "$fail_tx" ]; then
+ if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then
extra_msg+=",tx=$count"
fi
if [ -z "$count" ]; then
print_skip
elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
- { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
+ { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
fail_test "got $count MP_FAIL[s] TX expected $fail_tx"
else
print_ok
@@ -1183,13 +1188,13 @@
print_check "failrx"
count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
- if [ "$count" != "$fail_rx" ]; then
+ if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then
extra_msg+=",rx=$count"
fi
if [ -z "$count" ]; then
print_skip
elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
- { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
+ { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
fail_test "got $count MP_FAIL[s] RX expected $fail_rx"
else
print_ok
@@ -3333,6 +3338,36 @@
fi
}
+# $1: ns ; $2: event type ; $3: count
+chk_evt_nr()
+{
+ local ns=${1}
+ local evt_name="${2}"
+ local exp="${3}"
+
+ local evts="${evts_ns1}"
+ local evt="${!evt_name}"
+ local count
+
+ evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_
+ [ "${ns}" == "ns2" ] && evts="${evts_ns2}"
+
+ print_check "event ${ns} ${evt_name} (${exp})"
+
+ if [[ "${evt_name}" = "LISTENER_"* ]] &&
+ ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then
+ print_skip "event not supported"
+ return
+ fi
+
+ count=$(grep -cw "type:${evt}" "${evts}")
+ if [ "${count}" != "${exp}" ]; then
+ fail_test "got ${count} events, expected ${exp}"
+ else
+ print_ok
+ fi
+}
+
userspace_tests()
{
# userspace pm type prevents add_addr
@@ -3429,14 +3464,12 @@
"signal"
userspace_pm_chk_get_addr "${ns1}" "10" "id 10 flags signal 10.0.2.1"
userspace_pm_chk_get_addr "${ns1}" "20" "id 20 flags signal 10.0.3.1"
- userspace_pm_rm_addr $ns1 10
userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $MPTCP_LIB_EVENT_SUB_ESTABLISHED
userspace_pm_chk_dump_addr "${ns1}" \
- "id 20 flags signal 10.0.3.1" "after rm_addr 10"
+ "id 20 flags signal 10.0.3.1" "after rm_sf 10"
userspace_pm_rm_addr $ns1 20
- userspace_pm_rm_sf $ns1 10.0.3.1 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
userspace_pm_chk_dump_addr "${ns1}" "" "after rm_addr 20"
- chk_rm_nr 2 2 invert
+ chk_rm_nr 1 1 invert
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
kill_events_pids
@@ -3460,12 +3493,11 @@
"id 20 flags subflow 10.0.3.2" \
"subflow"
userspace_pm_chk_get_addr "${ns2}" "20" "id 20 flags subflow 10.0.3.2"
- userspace_pm_rm_addr $ns2 20
userspace_pm_rm_sf $ns2 10.0.3.2 $MPTCP_LIB_EVENT_SUB_ESTABLISHED
userspace_pm_chk_dump_addr "${ns2}" \
"" \
- "after rm_addr 20"
- chk_rm_nr 1 1
+ "after rm_sf 20"
+ chk_rm_nr 0 1
chk_mptcp_info subflows 0 subflows 0
chk_subflows_total 1 1
kill_events_pids
@@ -3575,27 +3607,29 @@
if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
- pm_nl_set_limits $ns1 0 2
- pm_nl_set_limits $ns2 0 2
+ start_events
+ pm_nl_set_limits $ns1 0 3
+ pm_nl_set_limits $ns2 0 3
+ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
- test_linkfail=4 speed=20 \
+ test_linkfail=4 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
wait_mpj $ns2
pm_nl_check_endpoint "creation" \
$ns2 10.0.2.2 id 2 flags subflow dev ns2eth2
- chk_subflow_nr "before delete" 2
+ chk_subflow_nr "before delete id 2" 2
chk_mptcp_info subflows 1 subflows 1
pm_nl_del_endpoint $ns2 2 10.0.2.2
sleep 0.5
- chk_subflow_nr "after delete" 1
+ chk_subflow_nr "after delete id 2" 1
chk_mptcp_info subflows 0 subflows 0
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
wait_mpj $ns2
- chk_subflow_nr "after re-add" 2
+ chk_subflow_nr "after re-add id 2" 2
chk_mptcp_info subflows 1 subflows 1
pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
@@ -3610,21 +3644,51 @@
chk_subflow_nr "after no reject" 3
chk_mptcp_info subflows 2 subflows 2
+ local i
+ for i in $(seq 3); do
+ pm_nl_del_endpoint $ns2 1 10.0.1.2
+ sleep 0.5
+ chk_subflow_nr "after delete id 0 ($i)" 2
+ chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
+
+ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+ wait_mpj $ns2
+ chk_subflow_nr "after re-add id 0 ($i)" 3
+ chk_mptcp_info subflows 3 subflows 3
+ done
+
mptcp_lib_kill_wait $tests_pid
- chk_join_nr 3 3 3
- chk_rm_nr 1 1
+ kill_events_pids
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 4
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 4
+
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 0
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 0
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 6
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 5 # one has been closed before estab
+
+ chk_join_nr 6 6 6
+ chk_rm_nr 4 4
fi
# remove and re-add
- if reset "delete re-add signal" &&
+ if reset_with_events "delete re-add signal" &&
mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
- pm_nl_set_limits $ns1 0 2
- pm_nl_set_limits $ns2 2 2
+ pm_nl_set_limits $ns1 0 3
+ pm_nl_set_limits $ns2 3 3
pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
- test_linkfail=4 speed=20 \
+ pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
+ test_linkfail=4 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
@@ -3645,11 +3709,47 @@
wait_mpj $ns2
chk_subflow_nr "after re-add" 3
chk_mptcp_info subflows 2 subflows 2
+
+ pm_nl_del_endpoint $ns1 42 10.0.1.1
+ sleep 0.5
+ chk_subflow_nr "after delete ID 0" 2
+ chk_mptcp_info subflows 2 subflows 2
+
+ pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
+ wait_mpj $ns2
+ chk_subflow_nr "after re-add ID 0" 3
+ chk_mptcp_info subflows 3 subflows 3
+
+ pm_nl_del_endpoint $ns1 99 10.0.1.1
+ sleep 0.5
+ chk_subflow_nr "after re-delete ID 0" 2
+ chk_mptcp_info subflows 2 subflows 2
+
+ pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
+ wait_mpj $ns2
+ chk_subflow_nr "after re-re-add ID 0" 3
+ chk_mptcp_info subflows 3 subflows 3
mptcp_lib_kill_wait $tests_pid
- chk_join_nr 3 3 3
- chk_add_nr 4 4
- chk_rm_nr 2 1 invert
+ kill_events_pids
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_LISTENER_CREATED 1
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_CREATED 1
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_ESTABLISHED 1
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_ANNOUNCED 0
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_REMOVED 0
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
+ chk_evt_nr ns1 MPTCP_LIB_EVENT_SUB_CLOSED 3
+
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_CREATED 1
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_ESTABLISHED 1
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_ANNOUNCED 6
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_REMOVED 4
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_ESTABLISHED 5
+ chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 3
+
+ chk_join_nr 5 5 5
+ chk_add_nr 6 6
+ chk_rm_nr 4 3 invert
fi
# flush and re-add
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
index 438280e..4578a33 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
@@ -12,10 +12,14 @@
readonly KSFT_TEST="${MPTCP_LIB_KSFT_TEST:-$(basename "${0}" .sh)}"
# These variables are used in some selftests, read-only
+declare -rx MPTCP_LIB_EVENT_CREATED=1 # MPTCP_EVENT_CREATED
+declare -rx MPTCP_LIB_EVENT_ESTABLISHED=2 # MPTCP_EVENT_ESTABLISHED
+declare -rx MPTCP_LIB_EVENT_CLOSED=3 # MPTCP_EVENT_CLOSED
declare -rx MPTCP_LIB_EVENT_ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
declare -rx MPTCP_LIB_EVENT_REMOVED=7 # MPTCP_EVENT_REMOVED
declare -rx MPTCP_LIB_EVENT_SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
declare -rx MPTCP_LIB_EVENT_SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
+declare -rx MPTCP_LIB_EVENT_SUB_PRIORITY=13 # MPTCP_EVENT_SUB_PRIORITY
declare -rx MPTCP_LIB_EVENT_LISTENER_CREATED=15 # MPTCP_EVENT_LISTENER_CREATED
declare -rx MPTCP_LIB_EVENT_LISTENER_CLOSED=16 # MPTCP_EVENT_LISTENER_CLOSED