8235 lines
278 KiB
Diff
8235 lines
278 KiB
Diff
|
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
|
||
|
index 33b04db8408f9..fda97b3fcf018 100644
|
||
|
--- a/Documentation/arm64/silicon-errata.rst
|
||
|
+++ b/Documentation/arm64/silicon-errata.rst
|
||
|
@@ -52,6 +52,8 @@ stable kernels.
|
||
|
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
|
||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||
|
+| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
|
||
|
++----------------+-----------------+-----------------+-----------------------------+
|
||
|
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
|
||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||
|
| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
|
||
|
diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst
|
||
|
index 78ca69eda8778..02f4ad314a1eb 100644
|
||
|
--- a/Documentation/hwmon/asus_ec_sensors.rst
|
||
|
+++ b/Documentation/hwmon/asus_ec_sensors.rst
|
||
|
@@ -13,12 +13,16 @@ Supported boards:
|
||
|
* ROG CROSSHAIR VIII FORMULA
|
||
|
* ROG CROSSHAIR VIII HERO
|
||
|
* ROG CROSSHAIR VIII IMPACT
|
||
|
+ * ROG MAXIMUS XI HERO
|
||
|
+ * ROG MAXIMUS XI HERO (WI-FI)
|
||
|
* ROG STRIX B550-E GAMING
|
||
|
* ROG STRIX B550-I GAMING
|
||
|
* ROG STRIX X570-E GAMING
|
||
|
* ROG STRIX X570-E GAMING WIFI II
|
||
|
* ROG STRIX X570-F GAMING
|
||
|
* ROG STRIX X570-I GAMING
|
||
|
+ * ROG STRIX Z690-A GAMING WIFI D4
|
||
|
+ * ROG ZENITH II EXTREME
|
||
|
|
||
|
Authors:
|
||
|
- Eugene Shalygin <eugene.shalygin@gmail.com>
|
||
|
diff --git a/Makefile b/Makefile
|
||
|
index e361c6230e9e5..1f27c4bd09e67 100644
|
||
|
--- a/Makefile
|
||
|
+++ b/Makefile
|
||
|
@@ -1,7 +1,7 @@
|
||
|
# SPDX-License-Identifier: GPL-2.0
|
||
|
VERSION = 5
|
||
|
PATCHLEVEL = 19
|
||
|
-SUBLEVEL = 8
|
||
|
+SUBLEVEL = 9
|
||
|
EXTRAVERSION =
|
||
|
NAME = Superb Owl
|
||
|
|
||
|
@@ -1286,8 +1286,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
|
||
|
|
||
|
PHONY += headers
|
||
|
headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
|
||
|
- $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
|
||
|
- $(error Headers not exportable for the $(SRCARCH) architecture))
|
||
|
+ $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML))
|
||
|
$(Q)$(MAKE) $(hdr-inst)=include/uapi
|
||
|
$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
|
||
|
|
||
|
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
|
||
|
index ba621783acdbc..d6f364c6be94b 100644
|
||
|
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
|
||
|
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
|
||
|
@@ -76,8 +76,8 @@
|
||
|
regulators {
|
||
|
vdd_3v3: VDD_IO {
|
||
|
regulator-name = "VDD_IO";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <3300000>;
|
||
|
+ regulator-max-microvolt = <3300000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -95,8 +95,8 @@
|
||
|
|
||
|
vddio_ddr: VDD_DDR {
|
||
|
regulator-name = "VDD_DDR";
|
||
|
- regulator-min-microvolt = <600000>;
|
||
|
- regulator-max-microvolt = <1850000>;
|
||
|
+ regulator-min-microvolt = <1200000>;
|
||
|
+ regulator-max-microvolt = <1200000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -118,8 +118,8 @@
|
||
|
|
||
|
vdd_core: VDD_CORE {
|
||
|
regulator-name = "VDD_CORE";
|
||
|
- regulator-min-microvolt = <600000>;
|
||
|
- regulator-max-microvolt = <1850000>;
|
||
|
+ regulator-min-microvolt = <1250000>;
|
||
|
+ regulator-max-microvolt = <1250000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -160,8 +160,8 @@
|
||
|
|
||
|
LDO1 {
|
||
|
regulator-name = "LDO1";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <3300000>;
|
||
|
+ regulator-max-microvolt = <3300000>;
|
||
|
regulator-always-on;
|
||
|
|
||
|
regulator-state-standby {
|
||
|
@@ -175,9 +175,8 @@
|
||
|
|
||
|
LDO2 {
|
||
|
regulator-name = "LDO2";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
- regulator-always-on;
|
||
|
+ regulator-min-microvolt = <1800000>;
|
||
|
+ regulator-max-microvolt = <3300000>;
|
||
|
|
||
|
regulator-state-standby {
|
||
|
regulator-on-in-suspend;
|
||
|
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
||
|
index 164201a8fbf2d..492456e195a37 100644
|
||
|
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
||
|
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
|
||
|
@@ -197,8 +197,8 @@
|
||
|
regulators {
|
||
|
vdd_io_reg: VDD_IO {
|
||
|
regulator-name = "VDD_IO";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <3300000>;
|
||
|
+ regulator-max-microvolt = <3300000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -216,8 +216,8 @@
|
||
|
|
||
|
VDD_DDR {
|
||
|
regulator-name = "VDD_DDR";
|
||
|
- regulator-min-microvolt = <600000>;
|
||
|
- regulator-max-microvolt = <1850000>;
|
||
|
+ regulator-min-microvolt = <1350000>;
|
||
|
+ regulator-max-microvolt = <1350000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -235,8 +235,8 @@
|
||
|
|
||
|
VDD_CORE {
|
||
|
regulator-name = "VDD_CORE";
|
||
|
- regulator-min-microvolt = <600000>;
|
||
|
- regulator-max-microvolt = <1850000>;
|
||
|
+ regulator-min-microvolt = <1250000>;
|
||
|
+ regulator-max-microvolt = <1250000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -258,7 +258,6 @@
|
||
|
regulator-max-microvolt = <1850000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
- regulator-always-on;
|
||
|
|
||
|
regulator-state-standby {
|
||
|
regulator-on-in-suspend;
|
||
|
@@ -273,8 +272,8 @@
|
||
|
|
||
|
LDO1 {
|
||
|
regulator-name = "LDO1";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <2500000>;
|
||
|
+ regulator-max-microvolt = <2500000>;
|
||
|
regulator-always-on;
|
||
|
|
||
|
regulator-state-standby {
|
||
|
@@ -288,8 +287,8 @@
|
||
|
|
||
|
LDO2 {
|
||
|
regulator-name = "LDO2";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <3300000>;
|
||
|
+ regulator-max-microvolt = <3300000>;
|
||
|
regulator-always-on;
|
||
|
|
||
|
regulator-state-standby {
|
||
|
diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
|
||
|
index 103544620fd7c..b261b4da08502 100644
|
||
|
--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
|
||
|
+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
|
||
|
@@ -244,8 +244,8 @@
|
||
|
regulators {
|
||
|
vdd_3v3: VDD_IO {
|
||
|
regulator-name = "VDD_IO";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <3300000>;
|
||
|
+ regulator-max-microvolt = <3300000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -264,8 +264,8 @@
|
||
|
|
||
|
vddioddr: VDD_DDR {
|
||
|
regulator-name = "VDD_DDR";
|
||
|
- regulator-min-microvolt = <1300000>;
|
||
|
- regulator-max-microvolt = <1450000>;
|
||
|
+ regulator-min-microvolt = <1350000>;
|
||
|
+ regulator-max-microvolt = <1350000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -285,8 +285,8 @@
|
||
|
|
||
|
vddcore: VDD_CORE {
|
||
|
regulator-name = "VDD_CORE";
|
||
|
- regulator-min-microvolt = <1100000>;
|
||
|
- regulator-max-microvolt = <1850000>;
|
||
|
+ regulator-min-microvolt = <1150000>;
|
||
|
+ regulator-max-microvolt = <1150000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-always-on;
|
||
|
@@ -306,7 +306,7 @@
|
||
|
vddcpu: VDD_OTHER {
|
||
|
regulator-name = "VDD_OTHER";
|
||
|
regulator-min-microvolt = <1050000>;
|
||
|
- regulator-max-microvolt = <1850000>;
|
||
|
+ regulator-max-microvolt = <1250000>;
|
||
|
regulator-initial-mode = <2>;
|
||
|
regulator-allowed-modes = <2>, <4>;
|
||
|
regulator-ramp-delay = <3125>;
|
||
|
@@ -326,8 +326,8 @@
|
||
|
|
||
|
vldo1: LDO1 {
|
||
|
regulator-name = "LDO1";
|
||
|
- regulator-min-microvolt = <1200000>;
|
||
|
- regulator-max-microvolt = <3700000>;
|
||
|
+ regulator-min-microvolt = <1800000>;
|
||
|
+ regulator-max-microvolt = <1800000>;
|
||
|
regulator-always-on;
|
||
|
|
||
|
regulator-state-standby {
|
||
|
diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
|
||
|
index 095c9143d99a3..6b791d515e294 100644
|
||
|
--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
|
||
|
+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
|
||
|
@@ -51,16 +51,6 @@
|
||
|
vin-supply = <®_3p3v_s5>;
|
||
|
};
|
||
|
|
||
|
- reg_3p3v_s0: regulator-3p3v-s0 {
|
||
|
- compatible = "regulator-fixed";
|
||
|
- regulator-name = "V_3V3_S0";
|
||
|
- regulator-min-microvolt = <3300000>;
|
||
|
- regulator-max-microvolt = <3300000>;
|
||
|
- regulator-always-on;
|
||
|
- regulator-boot-on;
|
||
|
- vin-supply = <®_3p3v_s5>;
|
||
|
- };
|
||
|
-
|
||
|
reg_3p3v_s5: regulator-3p3v-s5 {
|
||
|
compatible = "regulator-fixed";
|
||
|
regulator-name = "V_3V3_S5";
|
||
|
@@ -259,7 +249,7 @@
|
||
|
|
||
|
/* default boot source: workaround #1 for errata ERR006282 */
|
||
|
smarc_flash: flash@0 {
|
||
|
- compatible = "winbond,w25q16dw", "jedec,spi-nor";
|
||
|
+ compatible = "jedec,spi-nor";
|
||
|
reg = <0>;
|
||
|
spi-max-frequency = <20000000>;
|
||
|
};
|
||
|
diff --git a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
|
||
|
index a1676b5d2980f..c5a98b0110dd3 100644
|
||
|
--- a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
|
||
|
+++ b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
|
||
|
@@ -28,7 +28,7 @@
|
||
|
enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
|
||
|
};
|
||
|
|
||
|
- backlight_led: backlight_led {
|
||
|
+ backlight_led: backlight-led {
|
||
|
compatible = "pwm-backlight";
|
||
|
pwms = <&pwm3 0 5000000 0>;
|
||
|
brightness-levels = <0 16 64 255>;
|
||
|
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
|
||
|
index df6d673e83d56..f4501dea98b04 100644
|
||
|
--- a/arch/arm/mach-at91/pm.c
|
||
|
+++ b/arch/arm/mach-at91/pm.c
|
||
|
@@ -541,9 +541,41 @@ extern u32 at91_pm_suspend_in_sram_sz;
|
||
|
|
||
|
static int at91_suspend_finish(unsigned long val)
|
||
|
{
|
||
|
+ unsigned char modified_gray_code[] = {
|
||
|
+ 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
|
||
|
+ 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
|
||
|
+ 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
|
||
|
+ 0x10, 0x11,
|
||
|
+ };
|
||
|
+ unsigned int tmp, index;
|
||
|
int i;
|
||
|
|
||
|
if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
|
||
|
+ /*
|
||
|
+ * Bootloader will perform DDR recalibration and will try to
|
||
|
+ * restore the ZQ0SR0 with the value saved here. But the
|
||
|
+ * calibration is buggy and restoring some values from ZQ0SR0
|
||
|
+ * is forbidden and risky thus we need to provide processed
|
||
|
+ * values for these (modified gray code values).
|
||
|
+ */
|
||
|
+ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
|
||
|
+
|
||
|
+ /* Store pull-down output impedance select. */
|
||
|
+ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
|
||
|
+ soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
|
||
|
+
|
||
|
+ /* Store pull-up output impedance select. */
|
||
|
+ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
|
||
|
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
|
||
|
+
|
||
|
+ /* Store pull-down on-die termination impedance select. */
|
||
|
+ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
|
||
|
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
|
||
|
+
|
||
|
+ /* Store pull-up on-die termination impedance select. */
|
||
|
+ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
|
||
|
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
|
||
|
+
|
||
|
/*
|
||
|
* The 1st 8 words of memory might get corrupted in the process
|
||
|
* of DDR PHY recalibration; it is saved here in securam and it
|
||
|
@@ -1066,10 +1098,6 @@ static int __init at91_pm_backup_init(void)
|
||
|
of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
|
||
|
if (!located)
|
||
|
goto securam_fail;
|
||
|
-
|
||
|
- /* DDR3PHY_ZQ0SR0 */
|
||
|
- soc_pm.bu->ddr_phy_calibration[0] = readl(soc_pm.data.ramc_phy +
|
||
|
- 0x188);
|
||
|
}
|
||
|
|
||
|
return 0;
|
||
|
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
|
||
|
index abe4ced33edaf..ffed4d9490428 100644
|
||
|
--- a/arch/arm/mach-at91/pm_suspend.S
|
||
|
+++ b/arch/arm/mach-at91/pm_suspend.S
|
||
|
@@ -172,9 +172,15 @@ sr_ena_2:
|
||
|
/* Put DDR PHY's DLL in bypass mode for non-backup modes. */
|
||
|
cmp r7, #AT91_PM_BACKUP
|
||
|
beq sr_ena_3
|
||
|
- ldr tmp1, [r3, #DDR3PHY_PIR]
|
||
|
- orr tmp1, tmp1, #DDR3PHY_PIR_DLLBYP
|
||
|
- str tmp1, [r3, #DDR3PHY_PIR]
|
||
|
+
|
||
|
+ /* Disable DX DLLs. */
|
||
|
+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
|
||
|
+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
|
||
|
+ str tmp1, [r3, #DDR3PHY_DX0DLLCR]
|
||
|
+
|
||
|
+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR]
|
||
|
+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
|
||
|
+ str tmp1, [r3, #DDR3PHY_DX1DLLCR]
|
||
|
|
||
|
sr_ena_3:
|
||
|
/* Power down DDR PHY data receivers. */
|
||
|
@@ -221,10 +227,14 @@ sr_ena_3:
|
||
|
bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0
|
||
|
str tmp1, [r3, #DDR3PHY_DSGCR]
|
||
|
|
||
|
- /* Take DDR PHY's DLL out of bypass mode. */
|
||
|
- ldr tmp1, [r3, #DDR3PHY_PIR]
|
||
|
- bic tmp1, tmp1, #DDR3PHY_PIR_DLLBYP
|
||
|
- str tmp1, [r3, #DDR3PHY_PIR]
|
||
|
+ /* Enable DX DLLs. */
|
||
|
+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
|
||
|
+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
|
||
|
+ str tmp1, [r3, #DDR3PHY_DX0DLLCR]
|
||
|
+
|
||
|
+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR]
|
||
|
+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
|
||
|
+ str tmp1, [r3, #DDR3PHY_DX1DLLCR]
|
||
|
|
||
|
/* Enable quasi-dynamic programming. */
|
||
|
mov tmp1, #0
|
||
|
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
||
|
index 001eaba5a6b4b..cc1e7bb49d38b 100644
|
||
|
--- a/arch/arm64/Kconfig
|
||
|
+++ b/arch/arm64/Kconfig
|
||
|
@@ -914,6 +914,23 @@ config ARM64_ERRATUM_1902691
|
||
|
|
||
|
If unsure, say Y.
|
||
|
|
||
|
+config ARM64_ERRATUM_2457168
|
||
|
+ bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
|
||
|
+ depends on ARM64_AMU_EXTN
|
||
|
+ default y
|
||
|
+ help
|
||
|
+ This option adds the workaround for ARM Cortex-A510 erratum 2457168.
|
||
|
+
|
||
|
+ The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate
|
||
|
+ as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments
|
||
|
+ incorrectly giving a significantly higher output value.
|
||
|
+
|
||
|
+ Work around this problem by returning 0 when reading the affected counter in
|
||
|
+ key locations that results in disabling all users of this counter. This effect
|
||
|
+ is the same to firmware disabling affected counters.
|
||
|
+
|
||
|
+ If unsure, say Y.
|
||
|
+
|
||
|
config CAVIUM_ERRATUM_22375
|
||
|
bool "Cavium erratum 22375, 24313"
|
||
|
default y
|
||
|
@@ -1867,6 +1884,8 @@ config ARM64_BTI_KERNEL
|
||
|
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
|
||
|
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
|
||
|
depends on !CC_IS_GCC || GCC_VERSION >= 100100
|
||
|
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
|
||
|
+ depends on !CC_IS_GCC
|
||
|
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
|
||
|
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
|
||
|
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
|
||
|
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
|
||
|
index 40d34c8384a5e..b949cac037427 100644
|
||
|
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
|
||
|
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
|
||
|
@@ -25,7 +25,6 @@
|
||
|
&enetc_port0 {
|
||
|
phy-handle = <&slot1_sgmii>;
|
||
|
phy-mode = "2500base-x";
|
||
|
- managed = "in-band-status";
|
||
|
status = "okay";
|
||
|
};
|
||
|
|
||
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
|
||
|
index 24737e89038a4..96cac0f969a77 100644
|
||
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
|
||
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
|
||
|
@@ -626,24 +626,28 @@
|
||
|
lan1: port@0 {
|
||
|
reg = <0>;
|
||
|
label = "lan1";
|
||
|
+ phy-mode = "internal";
|
||
|
local-mac-address = [00 00 00 00 00 00];
|
||
|
};
|
||
|
|
||
|
lan2: port@1 {
|
||
|
reg = <1>;
|
||
|
label = "lan2";
|
||
|
+ phy-mode = "internal";
|
||
|
local-mac-address = [00 00 00 00 00 00];
|
||
|
};
|
||
|
|
||
|
lan3: port@2 {
|
||
|
reg = <2>;
|
||
|
label = "lan3";
|
||
|
+ phy-mode = "internal";
|
||
|
local-mac-address = [00 00 00 00 00 00];
|
||
|
};
|
||
|
|
||
|
lan4: port@3 {
|
||
|
reg = <3>;
|
||
|
label = "lan4";
|
||
|
+ phy-mode = "internal";
|
||
|
local-mac-address = [00 00 00 00 00 00];
|
||
|
};
|
||
|
|
||
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
|
||
|
index eafa88d980b32..c2d4da25482ff 100644
|
||
|
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
|
||
|
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
|
||
|
@@ -32,10 +32,10 @@
|
||
|
};
|
||
|
|
||
|
/* Fixed clock dedicated to SPI CAN controller */
|
||
|
- clk20m: oscillator {
|
||
|
+ clk40m: oscillator {
|
||
|
compatible = "fixed-clock";
|
||
|
#clock-cells = <0>;
|
||
|
- clock-frequency = <20000000>;
|
||
|
+ clock-frequency = <40000000>;
|
||
|
};
|
||
|
|
||
|
gpio-keys {
|
||
|
@@ -194,8 +194,8 @@
|
||
|
|
||
|
can1: can@0 {
|
||
|
compatible = "microchip,mcp251xfd";
|
||
|
- clocks = <&clk20m>;
|
||
|
- interrupts-extended = <&gpio1 6 IRQ_TYPE_EDGE_FALLING>;
|
||
|
+ clocks = <&clk40m>;
|
||
|
+ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_LOW>;
|
||
|
pinctrl-names = "default";
|
||
|
pinctrl-0 = <&pinctrl_can1_int>;
|
||
|
reg = <0>;
|
||
|
@@ -595,7 +595,7 @@
|
||
|
pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>;
|
||
|
reg = <0x4a>;
|
||
|
/* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */
|
||
|
- reset-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
|
||
|
+ reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
|
||
|
status = "disabled";
|
||
|
};
|
||
|
|
||
|
@@ -737,6 +737,7 @@
|
||
|
};
|
||
|
|
||
|
&usbphynop2 {
|
||
|
+ power-domains = <&pgc_otg2>;
|
||
|
vcc-supply = <®_vdd_3v3>;
|
||
|
};
|
||
|
|
||
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
|
||
|
index 521215520a0f4..6630ec561dc25 100644
|
||
|
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
|
||
|
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
|
||
|
@@ -770,10 +770,10 @@
|
||
|
|
||
|
pinctrl_sai2: sai2grp {
|
||
|
fsl,pins = <
|
||
|
- MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC
|
||
|
- MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00
|
||
|
- MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK
|
||
|
- MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK
|
||
|
+ MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC 0xd6
|
||
|
+ MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 0xd6
|
||
|
+ MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK 0xd6
|
||
|
+ MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK 0xd6
|
||
|
>;
|
||
|
};
|
||
|
|
||
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
|
||
|
index fb17e329cd370..f5323291a9b24 100644
|
||
|
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
|
||
|
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
|
||
|
@@ -620,7 +620,7 @@
|
||
|
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
|
||
|
reg = <0x4a>;
|
||
|
/* Verdin GPIO_2 (SODIMM 208) */
|
||
|
- reset-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
|
||
|
+ reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
|
||
|
status = "disabled";
|
||
|
};
|
||
|
};
|
||
|
@@ -697,7 +697,7 @@
|
||
|
pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>;
|
||
|
reg = <0x4a>;
|
||
|
/* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */
|
||
|
- reset-gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>;
|
||
|
+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
|
||
|
status = "disabled";
|
||
|
};
|
||
|
|
||
|
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
|
||
|
index 899e8e7dbc24f..802ad6e5cef61 100644
|
||
|
--- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
|
||
|
+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
|
||
|
@@ -204,7 +204,6 @@
|
||
|
reg = <0x51>;
|
||
|
pinctrl-names = "default";
|
||
|
pinctrl-0 = <&pinctrl_rtc>;
|
||
|
- interrupt-names = "irq";
|
||
|
interrupt-parent = <&gpio1>;
|
||
|
interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
|
||
|
quartz-load-femtofarads = <7000>;
|
||
|
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
|
||
|
index 7cbb0de060ddc..1c15726cff8bf 100644
|
||
|
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
|
||
|
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
|
||
|
@@ -85,7 +85,7 @@
|
||
|
"renesas,rcar-gen4-hscif",
|
||
|
"renesas,hscif";
|
||
|
reg = <0 0xe6540000 0 96>;
|
||
|
- interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
|
||
|
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
|
||
|
clocks = <&cpg CPG_MOD 514>,
|
||
|
<&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>,
|
||
|
<&scif_clk>;
|
||
|
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
|
||
|
index 5f4117dae8888..af137f91607da 100644
|
||
|
--- a/arch/arm64/kernel/cpu_errata.c
|
||
|
+++ b/arch/arm64/kernel/cpu_errata.c
|
||
|
@@ -656,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||
|
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
|
||
|
},
|
||
|
#endif
|
||
|
+#ifdef CONFIG_ARM64_ERRATUM_2457168
|
||
|
+ {
|
||
|
+ .desc = "ARM erratum 2457168",
|
||
|
+ .capability = ARM64_WORKAROUND_2457168,
|
||
|
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
|
||
|
+
|
||
|
+ /* Cortex-A510 r0p0-r1p1 */
|
||
|
+ CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
|
||
|
+ },
|
||
|
+#endif
|
||
|
#ifdef CONFIG_ARM64_ERRATUM_2038923
|
||
|
{
|
||
|
.desc = "ARM erratum 2038923",
|
||
|
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
|
||
|
index ebdfbd1cf207b..f34c9f8b9ee0a 100644
|
||
|
--- a/arch/arm64/kernel/cpufeature.c
|
||
|
+++ b/arch/arm64/kernel/cpufeature.c
|
||
|
@@ -1798,7 +1798,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
|
||
|
pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
|
||
|
smp_processor_id());
|
||
|
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
|
||
|
- update_freq_counters_refs();
|
||
|
+
|
||
|
+ /* 0 reference values signal broken/disabled counters */
|
||
|
+ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
|
||
|
+ update_freq_counters_refs();
|
||
|
}
|
||
|
}
|
||
|
|
||
|
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
|
||
|
index af5df48ba915b..2e248342476ea 100644
|
||
|
--- a/arch/arm64/kernel/hibernate.c
|
||
|
+++ b/arch/arm64/kernel/hibernate.c
|
||
|
@@ -300,6 +300,11 @@ static void swsusp_mte_restore_tags(void)
|
||
|
unsigned long pfn = xa_state.xa_index;
|
||
|
struct page *page = pfn_to_online_page(pfn);
|
||
|
|
||
|
+ /*
|
||
|
+ * It is not required to invoke page_kasan_tag_reset(page)
|
||
|
+ * at this point since the tags stored in page->flags are
|
||
|
+ * already restored.
|
||
|
+ */
|
||
|
mte_restore_page_tags(page_address(page), tags);
|
||
|
|
||
|
mte_free_tag_storage(tags);
|
||
|
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
|
||
|
index b2b730233274b..f6b00743c3994 100644
|
||
|
--- a/arch/arm64/kernel/mte.c
|
||
|
+++ b/arch/arm64/kernel/mte.c
|
||
|
@@ -48,6 +48,15 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
|
||
|
if (!pte_is_tagged)
|
||
|
return;
|
||
|
|
||
|
+ page_kasan_tag_reset(page);
|
||
|
+ /*
|
||
|
+ * We need smp_wmb() in between setting the flags and clearing the
|
||
|
+ * tags because if another thread reads page->flags and builds a
|
||
|
+ * tagged address out of it, there is an actual dependency to the
|
||
|
+ * memory access, but on the current thread we do not guarantee that
|
||
|
+ * the new page->flags are visible before the tags were updated.
|
||
|
+ */
|
||
|
+ smp_wmb();
|
||
|
mte_clear_page_tags(page_address(page));
|
||
|
}
|
||
|
|
||
|
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
|
||
|
index 9ab78ad826e2a..707b5451929d4 100644
|
||
|
--- a/arch/arm64/kernel/topology.c
|
||
|
+++ b/arch/arm64/kernel/topology.c
|
||
|
@@ -310,12 +310,25 @@ core_initcall(init_amu_fie);
|
||
|
|
||
|
static void cpu_read_corecnt(void *val)
|
||
|
{
|
||
|
+ /*
|
||
|
+ * A value of 0 can be returned if the current CPU does not support AMUs
|
||
|
+ * or if the counter is disabled for this CPU. A return value of 0 at
|
||
|
+ * counter read is properly handled as an error case by the users of the
|
||
|
+ * counter.
|
||
|
+ */
|
||
|
*(u64 *)val = read_corecnt();
|
||
|
}
|
||
|
|
||
|
static void cpu_read_constcnt(void *val)
|
||
|
{
|
||
|
- *(u64 *)val = read_constcnt();
|
||
|
+ /*
|
||
|
+ * Return 0 if the current CPU is affected by erratum 2457168. A value
|
||
|
+ * of 0 is also returned if the current CPU does not support AMUs or if
|
||
|
+ * the counter is disabled. A return value of 0 at counter read is
|
||
|
+ * properly handled as an error case by the users of the counter.
|
||
|
+ */
|
||
|
+ *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ?
|
||
|
+ 0UL : read_constcnt();
|
||
|
}
|
||
|
|
||
|
static inline
|
||
|
@@ -342,7 +355,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
|
||
|
*/
|
||
|
bool cpc_ffh_supported(void)
|
||
|
{
|
||
|
- return freq_counters_valid(get_cpu_with_amu_feat());
|
||
|
+ int cpu = get_cpu_with_amu_feat();
|
||
|
+
|
||
|
+ /*
|
||
|
+ * FFH is considered supported if there is at least one present CPU that
|
||
|
+ * supports AMUs. Using FFH to read core and reference counters for CPUs
|
||
|
+ * that do not support AMUs, have counters disabled or that are affected
|
||
|
+ * by errata, will result in a return value of 0.
|
||
|
+ *
|
||
|
+ * This is done to allow any enabled and valid counters to be read
|
||
|
+ * through FFH, knowing that potentially returning 0 as counter value is
|
||
|
+ * properly handled by the users of these counters.
|
||
|
+ */
|
||
|
+ if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
|
||
|
+ return false;
|
||
|
+
|
||
|
+ return true;
|
||
|
}
|
||
|
|
||
|
int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
|
||
|
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
|
||
|
index 24913271e898c..0dea80bf6de46 100644
|
||
|
--- a/arch/arm64/mm/copypage.c
|
||
|
+++ b/arch/arm64/mm/copypage.c
|
||
|
@@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from)
|
||
|
|
||
|
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
|
||
|
set_bit(PG_mte_tagged, &to->flags);
|
||
|
+ page_kasan_tag_reset(to);
|
||
|
+ /*
|
||
|
+ * We need smp_wmb() in between setting the flags and clearing the
|
||
|
+ * tags because if another thread reads page->flags and builds a
|
||
|
+ * tagged address out of it, there is an actual dependency to the
|
||
|
+ * memory access, but on the current thread we do not guarantee that
|
||
|
+ * the new page->flags are visible before the tags were updated.
|
||
|
+ */
|
||
|
+ smp_wmb();
|
||
|
mte_copy_page_tags(kto, kfrom);
|
||
|
}
|
||
|
}
|
||
|
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
|
||
|
index 4334dec93bd44..a9e50e930484a 100644
|
||
|
--- a/arch/arm64/mm/mteswap.c
|
||
|
+++ b/arch/arm64/mm/mteswap.c
|
||
|
@@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
|
||
|
if (!tags)
|
||
|
return false;
|
||
|
|
||
|
+ page_kasan_tag_reset(page);
|
||
|
+ /*
|
||
|
+ * We need smp_wmb() in between setting the flags and clearing the
|
||
|
+ * tags because if another thread reads page->flags and builds a
|
||
|
+ * tagged address out of it, there is an actual dependency to the
|
||
|
+ * memory access, but on the current thread we do not guarantee that
|
||
|
+ * the new page->flags are visible before the tags were updated.
|
||
|
+ */
|
||
|
+ smp_wmb();
|
||
|
mte_restore_page_tags(page_address(page), tags);
|
||
|
|
||
|
return true;
|
||
|
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
|
||
|
index 8809e14cf86a2..18999f46df19f 100644
|
||
|
--- a/arch/arm64/tools/cpucaps
|
||
|
+++ b/arch/arm64/tools/cpucaps
|
||
|
@@ -66,6 +66,7 @@ WORKAROUND_1902691
|
||
|
WORKAROUND_2038923
|
||
|
WORKAROUND_2064142
|
||
|
WORKAROUND_2077057
|
||
|
+WORKAROUND_2457168
|
||
|
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||
|
WORKAROUND_TSB_FLUSH_FAILURE
|
||
|
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||
|
diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
|
||
|
index e9de6da0ce51f..9dcfe9de55b0a 100644
|
||
|
--- a/arch/mips/loongson32/ls1c/board.c
|
||
|
+++ b/arch/mips/loongson32/ls1c/board.c
|
||
|
@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = {
|
||
|
static int __init ls1c_platform_init(void)
|
||
|
{
|
||
|
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
|
||
|
- ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
|
||
|
|
||
|
return platform_add_devices(ls1c_platform_devices,
|
||
|
ARRAY_SIZE(ls1c_platform_devices));
|
||
|
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
|
||
|
index 56ffd260c669b..0ec9cfc5131fc 100644
|
||
|
--- a/arch/parisc/include/asm/bitops.h
|
||
|
+++ b/arch/parisc/include/asm/bitops.h
|
||
|
@@ -12,14 +12,6 @@
|
||
|
#include <asm/barrier.h>
|
||
|
#include <linux/atomic.h>
|
||
|
|
||
|
-/* compiler build environment sanity checks: */
|
||
|
-#if !defined(CONFIG_64BIT) && defined(__LP64__)
|
||
|
-#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
|
||
|
-#endif
|
||
|
-#if defined(CONFIG_64BIT) && !defined(__LP64__)
|
||
|
-#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
|
||
|
-#endif
|
||
|
-
|
||
|
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
|
||
|
* on use of volatile and __*_bit() (set/clear/change):
|
||
|
* *_bit() want use of volatile.
|
||
|
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
|
||
|
index e0a9e96576221..fd15fd4bbb61b 100644
|
||
|
--- a/arch/parisc/kernel/head.S
|
||
|
+++ b/arch/parisc/kernel/head.S
|
||
|
@@ -22,7 +22,7 @@
|
||
|
#include <linux/init.h>
|
||
|
#include <linux/pgtable.h>
|
||
|
|
||
|
- .level PA_ASM_LEVEL
|
||
|
+ .level 1.1
|
||
|
|
||
|
__INITDATA
|
||
|
ENTRY(boot_args)
|
||
|
@@ -70,6 +70,47 @@ $bss_loop:
|
||
|
stw,ma %arg2,4(%r1)
|
||
|
stw,ma %arg3,4(%r1)
|
||
|
|
||
|
+#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||
|
+ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||
|
+ * and halt kernel if we detect a PA1.x CPU. */
|
||
|
+ ldi 32,%r10
|
||
|
+ mtctl %r10,%cr11
|
||
|
+ .level 2.0
|
||
|
+ mfctl,w %cr11,%r10
|
||
|
+ .level 1.1
|
||
|
+ comib,<>,n 0,%r10,$cpu_ok
|
||
|
+
|
||
|
+ load32 PA(msg1),%arg0
|
||
|
+ ldi msg1_end-msg1,%arg1
|
||
|
+$iodc_panic:
|
||
|
+ copy %arg0, %r10
|
||
|
+ copy %arg1, %r11
|
||
|
+ load32 PA(init_stack),%sp
|
||
|
+#define MEM_CONS 0x3A0
|
||
|
+ ldw MEM_CONS+32(%r0),%arg0 // HPA
|
||
|
+ ldi ENTRY_IO_COUT,%arg1
|
||
|
+ ldw MEM_CONS+36(%r0),%arg2 // SPA
|
||
|
+ ldw MEM_CONS+8(%r0),%arg3 // layers
|
||
|
+ load32 PA(__bss_start),%r1
|
||
|
+ stw %r1,-52(%sp) // arg4
|
||
|
+ stw %r0,-56(%sp) // arg5
|
||
|
+ stw %r10,-60(%sp) // arg6 = ptr to text
|
||
|
+ stw %r11,-64(%sp) // arg7 = len
|
||
|
+ stw %r0,-68(%sp) // arg8
|
||
|
+ load32 PA(.iodc_panic_ret), %rp
|
||
|
+ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
|
||
|
+ bv,n (%r1)
|
||
|
+.iodc_panic_ret:
|
||
|
+ b . /* wait endless with ... */
|
||
|
+ or %r10,%r10,%r10 /* qemu idle sleep */
|
||
|
+msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
|
||
|
+msg1_end:
|
||
|
+
|
||
|
+$cpu_ok:
|
||
|
+#endif
|
||
|
+
|
||
|
+ .level PA_ASM_LEVEL
|
||
|
+
|
||
|
/* Initialize startup VM. Just map first 16/32 MB of memory */
|
||
|
load32 PA(swapper_pg_dir),%r4
|
||
|
mtctl %r4,%cr24 /* Initialize kernel root pointer */
|
||
|
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
|
||
|
index 9f5bce1488d93..9bf37ef379509 100644
|
||
|
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
|
||
|
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
|
||
|
@@ -161,7 +161,7 @@
|
||
|
ranges;
|
||
|
|
||
|
cctrllr: cache-controller@2010000 {
|
||
|
- compatible = "sifive,fu540-c000-ccache", "cache";
|
||
|
+ compatible = "microchip,mpfs-ccache", "sifive,fu540-c000-ccache", "cache";
|
||
|
reg = <0x0 0x2010000 0x0 0x1000>;
|
||
|
cache-block-size = <64>;
|
||
|
cache-level = <2>;
|
||
|
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
|
||
|
index 53ed3884fe644..5d66e3947070c 100644
|
||
|
--- a/arch/s390/kernel/nmi.c
|
||
|
+++ b/arch/s390/kernel/nmi.c
|
||
|
@@ -63,7 +63,7 @@ static inline unsigned long nmi_get_mcesa_size(void)
|
||
|
* structure. The structure is required for machine check happening
|
||
|
* early in the boot process.
|
||
|
*/
|
||
|
-static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
|
||
|
+static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
|
||
|
|
||
|
void __init nmi_alloc_mcesa_early(u64 *mcesad)
|
||
|
{
|
||
|
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
|
||
|
index 0a37f5de28631..3e0361db963ef 100644
|
||
|
--- a/arch/s390/kernel/setup.c
|
||
|
+++ b/arch/s390/kernel/setup.c
|
||
|
@@ -486,6 +486,7 @@ static void __init setup_lowcore_dat_off(void)
|
||
|
put_abs_lowcore(restart_data, lc->restart_data);
|
||
|
put_abs_lowcore(restart_source, lc->restart_source);
|
||
|
put_abs_lowcore(restart_psw, lc->restart_psw);
|
||
|
+ put_abs_lowcore(mcesad, lc->mcesad);
|
||
|
|
||
|
lc->spinlock_lockval = arch_spin_lockval(0);
|
||
|
lc->spinlock_index = 0;
|
||
|
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
|
||
|
index 4a23e52fe0ee1..ebc271bb6d8ed 100644
|
||
|
--- a/arch/x86/include/asm/sev.h
|
||
|
+++ b/arch/x86/include/asm/sev.h
|
||
|
@@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
|
||
|
void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
|
||
|
void snp_set_wakeup_secondary_cpu(void);
|
||
|
bool snp_init(struct boot_params *bp);
|
||
|
-void snp_abort(void);
|
||
|
+void __init __noreturn snp_abort(void);
|
||
|
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
|
||
|
#else
|
||
|
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
|
||
|
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
|
||
|
index 4f84c3f11af5b..a428c62330d37 100644
|
||
|
--- a/arch/x86/kernel/sev.c
|
||
|
+++ b/arch/x86/kernel/sev.c
|
||
|
@@ -2112,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp)
|
||
|
return true;
|
||
|
}
|
||
|
|
||
|
-void __init snp_abort(void)
|
||
|
+void __init __noreturn snp_abort(void)
|
||
|
{
|
||
|
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
|
||
|
}
|
||
|
diff --git a/block/partitions/core.c b/block/partitions/core.c
|
||
|
index 8a0ec929023bc..76617b1d2d47f 100644
|
||
|
--- a/block/partitions/core.c
|
||
|
+++ b/block/partitions/core.c
|
||
|
@@ -597,6 +597,9 @@ static int blk_add_partitions(struct gendisk *disk)
|
||
|
if (disk->flags & GENHD_FL_NO_PART)
|
||
|
return 0;
|
||
|
|
||
|
+ if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
|
||
|
+ return 0;
|
||
|
+
|
||
|
state = check_partition(disk);
|
||
|
if (!state)
|
||
|
return 0;
|
||
|
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
|
||
|
index 15a75afe6b845..676b6275d5b53 100644
|
||
|
--- a/drivers/base/driver.c
|
||
|
+++ b/drivers/base/driver.c
|
||
|
@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override,
|
||
|
if (len >= (PAGE_SIZE - 1))
|
||
|
return -EINVAL;
|
||
|
|
||
|
+ /*
|
||
|
+ * Compute the real length of the string in case userspace sends us a
|
||
|
+ * bunch of \0 characters like python likes to do.
|
||
|
+ */
|
||
|
+ len = strlen(s);
|
||
|
+
|
||
|
if (!len) {
|
||
|
/* Empty string passed - clear override */
|
||
|
device_lock(dev);
|
||
|
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
|
||
|
index 719323bc6c7f1..37ab23a9d0345 100644
|
||
|
--- a/drivers/base/regmap/regmap-spi.c
|
||
|
+++ b/drivers/base/regmap/regmap-spi.c
|
||
|
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
|
||
|
const struct regmap_config *config)
|
||
|
{
|
||
|
size_t max_size = spi_max_transfer_size(spi);
|
||
|
+ size_t max_msg_size, reg_reserve_size;
|
||
|
struct regmap_bus *bus;
|
||
|
|
||
|
if (max_size != SIZE_MAX) {
|
||
|
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
|
||
|
if (!bus)
|
||
|
return ERR_PTR(-ENOMEM);
|
||
|
|
||
|
+ max_msg_size = spi_max_message_size(spi);
|
||
|
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
|
||
|
+ + config->pad_bits / BITS_PER_BYTE;
|
||
|
+ if (max_size + reg_reserve_size > max_msg_size)
|
||
|
+ max_size -= reg_reserve_size;
|
||
|
+
|
||
|
bus->free_on_exit = true;
|
||
|
bus->max_raw_read = max_size;
|
||
|
bus->max_raw_write = max_size;
|
||
|
+
|
||
|
return bus;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
|
||
|
index 2cad427741647..f9fd1b6c15d42 100644
|
||
|
--- a/drivers/cpufreq/cpufreq.c
|
||
|
+++ b/drivers/cpufreq/cpufreq.c
|
||
|
@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
||
|
|
||
|
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||
|
|
||
|
- if (!cpufreq_driver->target_index)
|
||
|
+ if (!policy->freq_table)
|
||
|
return target_freq;
|
||
|
|
||
|
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
|
||
|
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
|
||
|
index 4dde8edd53b62..3e8d4b51a8140 100644
|
||
|
--- a/drivers/firmware/efi/capsule-loader.c
|
||
|
+++ b/drivers/firmware/efi/capsule-loader.c
|
||
|
@@ -242,29 +242,6 @@ failed:
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
-/**
|
||
|
- * efi_capsule_flush - called by file close or file flush
|
||
|
- * @file: file pointer
|
||
|
- * @id: not used
|
||
|
- *
|
||
|
- * If a capsule is being partially uploaded then calling this function
|
||
|
- * will be treated as upload termination and will free those completed
|
||
|
- * buffer pages and -ECANCELED will be returned.
|
||
|
- **/
|
||
|
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
|
||
|
-{
|
||
|
- int ret = 0;
|
||
|
- struct capsule_info *cap_info = file->private_data;
|
||
|
-
|
||
|
- if (cap_info->index > 0) {
|
||
|
- pr_err("capsule upload not complete\n");
|
||
|
- efi_free_all_buff_pages(cap_info);
|
||
|
- ret = -ECANCELED;
|
||
|
- }
|
||
|
-
|
||
|
- return ret;
|
||
|
-}
|
||
|
-
|
||
|
/**
|
||
|
* efi_capsule_release - called by file close
|
||
|
* @inode: not used
|
||
|
@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
|
||
|
{
|
||
|
struct capsule_info *cap_info = file->private_data;
|
||
|
|
||
|
+ if (cap_info->index > 0 &&
|
||
|
+ (cap_info->header.headersize == 0 ||
|
||
|
+ cap_info->count < cap_info->total_size)) {
|
||
|
+ pr_err("capsule upload not complete\n");
|
||
|
+ efi_free_all_buff_pages(cap_info);
|
||
|
+ }
|
||
|
+
|
||
|
kfree(cap_info->pages);
|
||
|
kfree(cap_info->phys);
|
||
|
kfree(file->private_data);
|
||
|
@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = {
|
||
|
.owner = THIS_MODULE,
|
||
|
.open = efi_capsule_open,
|
||
|
.write = efi_capsule_write,
|
||
|
- .flush = efi_capsule_flush,
|
||
|
.release = efi_capsule_release,
|
||
|
.llseek = no_llseek,
|
||
|
};
|
||
|
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
|
||
|
index d0537573501e9..2c67f71f23753 100644
|
||
|
--- a/drivers/firmware/efi/libstub/Makefile
|
||
|
+++ b/drivers/firmware/efi/libstub/Makefile
|
||
|
@@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
|
||
|
$(call cc-option,-fno-addrsig) \
|
||
|
-D__DISABLE_EXPORTS
|
||
|
|
||
|
+#
|
||
|
+# struct randomization only makes sense for Linux internal types, which the EFI
|
||
|
+# stub code never touches, so let's turn off struct randomization for the stub
|
||
|
+# altogether
|
||
|
+#
|
||
|
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
|
||
|
+
|
||
|
# remove SCS flags from all objects in this directory
|
||
|
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
|
||
|
# disable LTO
|
||
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
||
|
index 3adebb63680e0..67d4a3c13ed19 100644
|
||
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
||
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
||
|
@@ -2482,12 +2482,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||
|
if (!hive->reset_domain ||
|
||
|
!amdgpu_reset_get_reset_domain(hive->reset_domain)) {
|
||
|
r = -ENOENT;
|
||
|
+ amdgpu_put_xgmi_hive(hive);
|
||
|
goto init_failed;
|
||
|
}
|
||
|
|
||
|
/* Drop the early temporary reset domain we created for device */
|
||
|
amdgpu_reset_put_reset_domain(adev->reset_domain);
|
||
|
adev->reset_domain = hive->reset_domain;
|
||
|
+ amdgpu_put_xgmi_hive(hive);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
@@ -4473,8 +4475,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||
|
retry:
|
||
|
amdgpu_amdkfd_pre_reset(adev);
|
||
|
|
||
|
- amdgpu_amdkfd_pre_reset(adev);
|
||
|
-
|
||
|
if (from_hypervisor)
|
||
|
r = amdgpu_virt_request_full_gpu(adev, true);
|
||
|
else
|
||
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
|
||
|
index e9411c28d88ba..2b00f8fe15a89 100644
|
||
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
|
||
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
|
||
|
@@ -2612,6 +2612,9 @@ static int psp_hw_fini(void *handle)
|
||
|
psp_rap_terminate(psp);
|
||
|
psp_dtm_terminate(psp);
|
||
|
psp_hdcp_terminate(psp);
|
||
|
+
|
||
|
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
|
||
|
+ psp_xgmi_terminate(psp);
|
||
|
}
|
||
|
|
||
|
psp_asd_terminate(psp);
|
||
|
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
|
||
|
index 1b108d03e7859..f2aebbf3fbe38 100644
|
||
|
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
|
||
|
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
|
||
|
@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
|
||
|
amdgpu_put_xgmi_hive(hive);
|
||
|
}
|
||
|
|
||
|
- return psp_xgmi_terminate(&adev->psp);
|
||
|
+ return 0;
|
||
|
}
|
||
|
|
||
|
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
|
||
|
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
|
||
|
index a4a6751b1e449..30998ac47707c 100644
|
||
|
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
|
||
|
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
|
||
|
@@ -5090,9 +5090,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
|
||
|
data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
|
||
|
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
|
||
|
|
||
|
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||
|
- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
|
||
|
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||
|
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
|
||
|
+ if (adev->sdma.num_instances > 1) {
|
||
|
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||
|
+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
|
||
|
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||
|
+ }
|
||
|
} else {
|
||
|
/* Program RLC_CGCG_CGLS_CTRL */
|
||
|
def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
|
||
|
@@ -5121,9 +5124,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
|
||
|
data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
|
||
|
WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
|
||
|
|
||
|
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||
|
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
|
||
|
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||
|
+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
|
||
|
+ if (adev->sdma.num_instances > 1) {
|
||
|
+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
|
||
|
+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
|
||
|
+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
|
||
|
+ }
|
||
|
}
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
||
|
index 5349ca4d19e38..6d8ff3b099422 100644
|
||
|
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
||
|
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
|
||
|
@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
|
||
|
|
||
|
gfx_v9_0_tiling_mode_table_init(adev);
|
||
|
|
||
|
- gfx_v9_0_setup_rb(adev);
|
||
|
+ if (adev->gfx.num_gfx_rings)
|
||
|
+ gfx_v9_0_setup_rb(adev);
|
||
|
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
|
||
|
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
|
||
|
index 3f44a099c52a4..3e51e773f92be 100644
|
||
|
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
|
||
|
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
|
||
|
@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
|
||
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
|
||
|
|
||
|
+ tmp = mmVM_L2_CNTL3_DEFAULT;
|
||
|
if (adev->gmc.translate_further) {
|
||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
|
||
|
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
|
||
|
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
||
|
index c7a592d68febf..275bfb8ca6f89 100644
|
||
|
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
||
|
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
|
||
|
@@ -3188,7 +3188,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
|
||
|
&crc_win_y_end_fops);
|
||
|
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
|
||
|
&crc_win_update_fops);
|
||
|
-
|
||
|
+ dput(dir);
|
||
|
}
|
||
|
#endif
|
||
|
/*
|
||
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
|
||
|
index 30c6f9cd717f3..27fbe906682f9 100644
|
||
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
|
||
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
|
||
|
@@ -41,6 +41,12 @@
|
||
|
#define FN(reg_name, field) \
|
||
|
FD(reg_name##__##field)
|
||
|
|
||
|
+#include "logger_types.h"
|
||
|
+#undef DC_LOGGER
|
||
|
+#define DC_LOGGER \
|
||
|
+ CTX->logger
|
||
|
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||
|
+
|
||
|
#define VBIOSSMC_MSG_TestMessage 0x1
|
||
|
#define VBIOSSMC_MSG_GetSmuVersion 0x2
|
||
|
#define VBIOSSMC_MSG_PowerUpGfx 0x3
|
||
|
@@ -95,7 +101,13 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||
|
uint32_t result;
|
||
|
|
||
|
result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
|
||
|
- ASSERT(result == VBIOSSMC_Result_OK);
|
||
|
+
|
||
|
+ if (result != VBIOSSMC_Result_OK)
|
||
|
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||
|
+
|
||
|
+ if (result == VBIOSSMC_Status_BUSY) {
|
||
|
+ return -1;
|
||
|
+ }
|
||
|
|
||
|
/* First clear response register */
|
||
|
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
|
||
|
@@ -176,6 +188,10 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
|
||
|
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||
|
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||
|
|
||
|
+#ifdef DBG
|
||
|
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||
|
+#endif
|
||
|
+
|
||
|
return actual_dcfclk_set_mhz * 1000;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
|
||
|
index 1cae01a91a69d..e4f96b6fd79d0 100644
|
||
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
|
||
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
|
||
|
@@ -41,6 +41,12 @@
|
||
|
#define FN(reg_name, field) \
|
||
|
FD(reg_name##__##field)
|
||
|
|
||
|
+#include "logger_types.h"
|
||
|
+#undef DC_LOGGER
|
||
|
+#define DC_LOGGER \
|
||
|
+ CTX->logger
|
||
|
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||
|
+
|
||
|
#define VBIOSSMC_MSG_GetSmuVersion 0x2
|
||
|
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
|
||
|
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5
|
||
|
@@ -96,6 +102,13 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||
|
|
||
|
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
|
||
|
|
||
|
+ if (result != VBIOSSMC_Result_OK)
|
||
|
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||
|
+
|
||
|
+ if (result == VBIOSSMC_Status_BUSY) {
|
||
|
+ return -1;
|
||
|
+ }
|
||
|
+
|
||
|
/* First clear response register */
|
||
|
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
|
||
|
|
||
|
@@ -167,6 +180,10 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||
|
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||
|
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||
|
|
||
|
+#ifdef DBG
|
||
|
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||
|
+#endif
|
||
|
+
|
||
|
return actual_dcfclk_set_mhz * 1000;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
|
||
|
index c5d7d075026f3..090b2c02aee17 100644
|
||
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
|
||
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
|
||
|
@@ -40,6 +40,12 @@
|
||
|
#define FN(reg_name, field) \
|
||
|
FD(reg_name##__##field)
|
||
|
|
||
|
+#include "logger_types.h"
|
||
|
+#undef DC_LOGGER
|
||
|
+#define DC_LOGGER \
|
||
|
+ CTX->logger
|
||
|
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||
|
+
|
||
|
#define VBIOSSMC_MSG_TestMessage 0x1
|
||
|
#define VBIOSSMC_MSG_GetSmuVersion 0x2
|
||
|
#define VBIOSSMC_MSG_PowerUpGfx 0x3
|
||
|
@@ -102,7 +108,9 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
|
||
|
uint32_t result;
|
||
|
|
||
|
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||
|
- ASSERT(result == VBIOSSMC_Result_OK);
|
||
|
+
|
||
|
+ if (result != VBIOSSMC_Result_OK)
|
||
|
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||
|
|
||
|
if (result == VBIOSSMC_Status_BUSY) {
|
||
|
return -1;
|
||
|
@@ -194,6 +202,10 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
|
||
|
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||
|
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||
|
|
||
|
+#ifdef DBG
|
||
|
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||
|
+#endif
|
||
|
+
|
||
|
return actual_dcfclk_set_mhz * 1000;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
|
||
|
index 2600313fea579..925d6e13620ec 100644
|
||
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
|
||
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
|
||
|
@@ -70,6 +70,12 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
|
||
|
#define REG_NBIO(reg_name) \
|
||
|
(NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name)
|
||
|
|
||
|
+#include "logger_types.h"
|
||
|
+#undef DC_LOGGER
|
||
|
+#define DC_LOGGER \
|
||
|
+ CTX->logger
|
||
|
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||
|
+
|
||
|
#define mmMP1_C2PMSG_3 0x3B1050C
|
||
|
|
||
|
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
|
||
|
@@ -130,7 +136,9 @@ static int dcn315_smu_send_msg_with_param(
|
||
|
uint32_t result;
|
||
|
|
||
|
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
|
||
|
- ASSERT(result == VBIOSSMC_Result_OK);
|
||
|
+
|
||
|
+ if (result != VBIOSSMC_Result_OK)
|
||
|
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||
|
|
||
|
if (result == VBIOSSMC_Status_BUSY) {
|
||
|
return -1;
|
||
|
@@ -197,6 +205,10 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||
|
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||
|
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||
|
|
||
|
+#ifdef DBG
|
||
|
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||
|
+#endif
|
||
|
+
|
||
|
return actual_dcfclk_set_mhz * 1000;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
|
||
|
index dceec4b960527..457a9254ae1c8 100644
|
||
|
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
|
||
|
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
|
||
|
@@ -58,6 +58,12 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000
|
||
|
#define FN(reg_name, field) \
|
||
|
FD(reg_name##__##field)
|
||
|
|
||
|
+#include "logger_types.h"
|
||
|
+#undef DC_LOGGER
|
||
|
+#define DC_LOGGER \
|
||
|
+ CTX->logger
|
||
|
+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
|
||
|
+
|
||
|
#define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
|
||
|
#define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
|
||
|
#define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0
|
||
|
@@ -118,7 +124,9 @@ static int dcn316_smu_send_msg_with_param(
|
||
|
uint32_t result;
|
||
|
|
||
|
result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000);
|
||
|
- ASSERT(result == VBIOSSMC_Result_OK);
|
||
|
+
|
||
|
+ if (result != VBIOSSMC_Result_OK)
|
||
|
+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result);
|
||
|
|
||
|
if (result == VBIOSSMC_Status_BUSY) {
|
||
|
return -1;
|
||
|
@@ -183,6 +191,10 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
|
||
|
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||
|
khz_to_mhz_ceil(requested_dcfclk_khz));
|
||
|
|
||
|
+#ifdef DBG
|
||
|
+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
|
||
|
+#endif
|
||
|
+
|
||
|
return actual_dcfclk_set_mhz * 1000;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
|
||
|
index 86d670c712867..ad068865ba206 100644
|
||
|
--- a/drivers/gpu/drm/drm_gem.c
|
||
|
+++ b/drivers/gpu/drm/drm_gem.c
|
||
|
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
|
||
|
}
|
||
|
EXPORT_SYMBOL(drm_gem_private_object_init);
|
||
|
|
||
|
-static void
|
||
|
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
|
||
|
-{
|
||
|
- /*
|
||
|
- * Note: obj->dma_buf can't disappear as long as we still hold a
|
||
|
- * handle reference in obj->handle_count.
|
||
|
- */
|
||
|
- mutex_lock(&filp->prime.lock);
|
||
|
- if (obj->dma_buf) {
|
||
|
- drm_prime_remove_buf_handle_locked(&filp->prime,
|
||
|
- obj->dma_buf);
|
||
|
- }
|
||
|
- mutex_unlock(&filp->prime.lock);
|
||
|
-}
|
||
|
-
|
||
|
/**
|
||
|
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||
|
* @obj: GEM object to clean up.
|
||
|
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
|
||
|
if (obj->funcs->close)
|
||
|
obj->funcs->close(obj, file_priv);
|
||
|
|
||
|
- drm_gem_remove_prime_handles(obj, file_priv);
|
||
|
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
|
||
|
drm_vma_node_revoke(&obj->vma_node, file_priv);
|
||
|
|
||
|
drm_gem_object_handle_put_unlocked(obj);
|
||
|
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
|
||
|
index 1fbbc19f1ac09..7bb98e6a446d0 100644
|
||
|
--- a/drivers/gpu/drm/drm_internal.h
|
||
|
+++ b/drivers/gpu/drm/drm_internal.h
|
||
|
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
|
||
|
|
||
|
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
|
||
|
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
|
||
|
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
|
||
|
- struct dma_buf *dma_buf);
|
||
|
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
||
|
+ uint32_t handle);
|
||
|
|
||
|
/* drm_drv.c */
|
||
|
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
|
||
|
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
|
||
|
index e3f09f18110c7..bd5366b16381b 100644
|
||
|
--- a/drivers/gpu/drm/drm_prime.c
|
||
|
+++ b/drivers/gpu/drm/drm_prime.c
|
||
|
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
|
||
|
return -ENOENT;
|
||
|
}
|
||
|
|
||
|
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
|
||
|
- struct dma_buf *dma_buf)
|
||
|
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
|
||
|
+ uint32_t handle)
|
||
|
{
|
||
|
struct rb_node *rb;
|
||
|
|
||
|
- rb = prime_fpriv->dmabufs.rb_node;
|
||
|
+ mutex_lock(&prime_fpriv->lock);
|
||
|
+
|
||
|
+ rb = prime_fpriv->handles.rb_node;
|
||
|
while (rb) {
|
||
|
struct drm_prime_member *member;
|
||
|
|
||
|
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
|
||
|
- if (member->dma_buf == dma_buf) {
|
||
|
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
|
||
|
+ if (member->handle == handle) {
|
||
|
rb_erase(&member->handle_rb, &prime_fpriv->handles);
|
||
|
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
|
||
|
|
||
|
- dma_buf_put(dma_buf);
|
||
|
+ dma_buf_put(member->dma_buf);
|
||
|
kfree(member);
|
||
|
- return;
|
||
|
- } else if (member->dma_buf < dma_buf) {
|
||
|
+ break;
|
||
|
+ } else if (member->handle < handle) {
|
||
|
rb = rb->rb_right;
|
||
|
} else {
|
||
|
rb = rb->rb_left;
|
||
|
}
|
||
|
}
|
||
|
+
|
||
|
+ mutex_unlock(&prime_fpriv->lock);
|
||
|
}
|
||
|
|
||
|
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
|
||
|
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
|
||
|
index 0c5638f5b72bc..91caf4523b34d 100644
|
||
|
--- a/drivers/gpu/drm/i915/display/intel_bios.c
|
||
|
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
|
||
|
@@ -478,6 +478,13 @@ init_bdb_block(struct drm_i915_private *i915,
|
||
|
|
||
|
block_size = get_blocksize(block);
|
||
|
|
||
|
+ /*
|
||
|
+ * Version number and new block size are considered
|
||
|
+ * part of the header for MIPI sequenece block v3+.
|
||
|
+ */
|
||
|
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
|
||
|
+ block_size += 5;
|
||
|
+
|
||
|
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
|
||
|
GFP_KERNEL);
|
||
|
if (!entry) {
|
||
|
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
|
||
|
index 9feaf1a589f38..d213d8ad1ea53 100644
|
||
|
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
|
||
|
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
|
||
|
@@ -671,6 +671,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
|
||
|
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
|
||
|
&link_bw, &rate_select);
|
||
|
|
||
|
+ /*
|
||
|
+ * WaEdpLinkRateDataReload
|
||
|
+ *
|
||
|
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
|
||
|
+ * to snoop the link rates reported by the sink when we
|
||
|
+ * use LINK_RATE_SET in order to operate in jitter cleaning
|
||
|
+ * mode (as opposed to redriver mode). Unfortunately it
|
||
|
+ * loses track of the snooped link rates when powered down,
|
||
|
+ * so we need to make it re-snoop often. Without this high
|
||
|
+ * link rates are not stable.
|
||
|
+ */
|
||
|
+ if (!link_bw) {
|
||
|
+ struct intel_connector *connector = intel_dp->attached_connector;
|
||
|
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
|
||
|
+
|
||
|
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
|
||
|
+ connector->base.base.id, connector->base.name);
|
||
|
+
|
||
|
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
|
||
|
+ sink_rates, sizeof(sink_rates));
|
||
|
+ }
|
||
|
+
|
||
|
if (link_bw)
|
||
|
drm_dbg_kms(&i915->drm,
|
||
|
"[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
|
||
|
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
|
||
|
index 40e2e28ee6c75..bf01780e7ea56 100644
|
||
|
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
|
||
|
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
|
||
|
@@ -12,6 +12,7 @@
|
||
|
#include "intel_llc.h"
|
||
|
#include "intel_mchbar_regs.h"
|
||
|
#include "intel_pcode.h"
|
||
|
+#include "intel_rps.h"
|
||
|
|
||
|
struct ia_constants {
|
||
|
unsigned int min_gpu_freq;
|
||
|
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
|
||
|
if (!HAS_LLC(i915) || IS_DGFX(i915))
|
||
|
return false;
|
||
|
|
||
|
- if (rps->max_freq <= rps->min_freq)
|
||
|
- return false;
|
||
|
-
|
||
|
consts->max_ia_freq = cpu_max_MHz();
|
||
|
|
||
|
consts->min_ring_freq =
|
||
|
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
|
||
|
/* convert DDR frequency from units of 266.6MHz to bandwidth */
|
||
|
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
|
||
|
|
||
|
- consts->min_gpu_freq = rps->min_freq;
|
||
|
- consts->max_gpu_freq = rps->max_freq;
|
||
|
- if (GRAPHICS_VER(i915) >= 9) {
|
||
|
- /* Convert GT frequency to 50 HZ units */
|
||
|
- consts->min_gpu_freq /= GEN9_FREQ_SCALER;
|
||
|
- consts->max_gpu_freq /= GEN9_FREQ_SCALER;
|
||
|
- }
|
||
|
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
|
||
|
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
|
||
|
|
||
|
return true;
|
||
|
}
|
||
|
@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
|
||
|
if (!get_ia_constants(llc, &consts))
|
||
|
return;
|
||
|
|
||
|
+ /*
|
||
|
+ * Although this is unlikely on any platform during initialization,
|
||
|
+ * let's ensure we don't get accidentally into infinite loop
|
||
|
+ */
|
||
|
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
|
||
|
+ return;
|
||
|
/*
|
||
|
* For each potential GPU frequency, load a ring frequency we'd like
|
||
|
* to use for memory access. We do this by specifying the IA frequency
|
||
|
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
|
||
|
index 3476a11f294ce..7c068cc64c2fa 100644
|
||
|
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
|
||
|
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
|
||
|
@@ -2123,6 +2123,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
|
||
|
return intel_gpu_freq(rps, rps->max_freq_softlimit);
|
||
|
}
|
||
|
|
||
|
+/**
|
||
|
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
|
||
|
+ * @rps: the intel_rps structure
|
||
|
+ *
|
||
|
+ * Returns the max frequency in a raw format. In newer platforms raw is in
|
||
|
+ * units of 50 MHz.
|
||
|
+ */
|
||
|
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
|
||
|
+{
|
||
|
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
|
||
|
+ u32 freq;
|
||
|
+
|
||
|
+ if (rps_uses_slpc(rps)) {
|
||
|
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
|
||
|
+ GT_FREQUENCY_MULTIPLIER);
|
||
|
+ } else {
|
||
|
+ freq = rps->max_freq;
|
||
|
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
|
||
|
+ /* Convert GT frequency to 50 MHz units */
|
||
|
+ freq /= GEN9_FREQ_SCALER;
|
||
|
+ }
|
||
|
+ return freq;
|
||
|
+ }
|
||
|
+}
|
||
|
+
|
||
|
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
|
||
|
{
|
||
|
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
|
||
|
@@ -2211,6 +2236,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
|
||
|
return intel_gpu_freq(rps, rps->min_freq_softlimit);
|
||
|
}
|
||
|
|
||
|
+/**
|
||
|
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
|
||
|
+ * @rps: the intel_rps structure
|
||
|
+ *
|
||
|
+ * Returns the min frequency in a raw format. In newer platforms raw is in
|
||
|
+ * units of 50 MHz.
|
||
|
+ */
|
||
|
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
|
||
|
+{
|
||
|
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
|
||
|
+ u32 freq;
|
||
|
+
|
||
|
+ if (rps_uses_slpc(rps)) {
|
||
|
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
|
||
|
+ GT_FREQUENCY_MULTIPLIER);
|
||
|
+ } else {
|
||
|
+ freq = rps->min_freq;
|
||
|
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
|
||
|
+ /* Convert GT frequency to 50 MHz units */
|
||
|
+ freq /= GEN9_FREQ_SCALER;
|
||
|
+ }
|
||
|
+ return freq;
|
||
|
+ }
|
||
|
+}
|
||
|
+
|
||
|
static int set_min_freq(struct intel_rps *rps, u32 val)
|
||
|
{
|
||
|
int ret = 0;
|
||
|
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
|
||
|
index 1e8d564913083..4509dfdc52e09 100644
|
||
|
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
|
||
|
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
|
||
|
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
|
||
|
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
|
||
|
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
|
||
|
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
|
||
|
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
|
||
|
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
|
||
|
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
|
||
|
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
|
||
|
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
|
||
|
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
|
||
|
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
|
||
|
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
|
||
|
index 429644d5ddc69..9fba16cb3f1e7 100644
|
||
|
--- a/drivers/gpu/drm/radeon/radeon_device.c
|
||
|
+++ b/drivers/gpu/drm/radeon/radeon_device.c
|
||
|
@@ -1604,6 +1604,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
|
||
|
if (r) {
|
||
|
/* delay GPU reset to resume */
|
||
|
radeon_fence_driver_force_completion(rdev, i);
|
||
|
+ } else {
|
||
|
+ /* finish executing delayed work */
|
||
|
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
|
||
|
index 3633ab691662b..81e688975c6a7 100644
|
||
|
--- a/drivers/hwmon/asus-ec-sensors.c
|
||
|
+++ b/drivers/hwmon/asus-ec-sensors.c
|
||
|
@@ -54,6 +54,10 @@ static char *mutex_path_override;
|
||
|
/* ACPI mutex for locking access to the EC for the firmware */
|
||
|
#define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX"
|
||
|
|
||
|
+#define ASUS_HW_ACCESS_MUTEX_RMTW_ASMX "\\RMTW.ASMX"
|
||
|
+
|
||
|
+#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0 "\\_SB_.PCI0.SBRG.SIO1.MUT0"
|
||
|
+
|
||
|
#define MAX_IDENTICAL_BOARD_VARIATIONS 3
|
||
|
|
||
|
/* Moniker for the ACPI global lock (':' is not allowed in ASL identifiers) */
|
||
|
@@ -119,6 +123,18 @@ enum ec_sensors {
|
||
|
ec_sensor_temp_water_in,
|
||
|
/* "Water_Out" temperature sensor reading [℃] */
|
||
|
ec_sensor_temp_water_out,
|
||
|
+ /* "Water_Block_In" temperature sensor reading [℃] */
|
||
|
+ ec_sensor_temp_water_block_in,
|
||
|
+ /* "Water_Block_Out" temperature sensor reading [℃] */
|
||
|
+ ec_sensor_temp_water_block_out,
|
||
|
+ /* "T_sensor_2" temperature sensor reading [℃] */
|
||
|
+ ec_sensor_temp_t_sensor_2,
|
||
|
+ /* "Extra_1" temperature sensor reading [℃] */
|
||
|
+ ec_sensor_temp_sensor_extra_1,
|
||
|
+ /* "Extra_2" temperature sensor reading [℃] */
|
||
|
+ ec_sensor_temp_sensor_extra_2,
|
||
|
+ /* "Extra_3" temperature sensor reading [℃] */
|
||
|
+ ec_sensor_temp_sensor_extra_3,
|
||
|
};
|
||
|
|
||
|
#define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset)
|
||
|
@@ -134,11 +150,19 @@ enum ec_sensors {
|
||
|
#define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu)
|
||
|
#define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in)
|
||
|
#define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out)
|
||
|
+#define SENSOR_TEMP_WATER_BLOCK_IN BIT(ec_sensor_temp_water_block_in)
|
||
|
+#define SENSOR_TEMP_WATER_BLOCK_OUT BIT(ec_sensor_temp_water_block_out)
|
||
|
+#define SENSOR_TEMP_T_SENSOR_2 BIT(ec_sensor_temp_t_sensor_2)
|
||
|
+#define SENSOR_TEMP_SENSOR_EXTRA_1 BIT(ec_sensor_temp_sensor_extra_1)
|
||
|
+#define SENSOR_TEMP_SENSOR_EXTRA_2 BIT(ec_sensor_temp_sensor_extra_2)
|
||
|
+#define SENSOR_TEMP_SENSOR_EXTRA_3 BIT(ec_sensor_temp_sensor_extra_3)
|
||
|
|
||
|
enum board_family {
|
||
|
family_unknown,
|
||
|
family_amd_400_series,
|
||
|
family_amd_500_series,
|
||
|
+ family_intel_300_series,
|
||
|
+ family_intel_600_series
|
||
|
};
|
||
|
|
||
|
/* All the known sensors for ASUS EC controllers */
|
||
|
@@ -195,15 +219,54 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
|
||
|
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
|
||
|
[ec_sensor_temp_water_out] =
|
||
|
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
|
||
|
+ [ec_sensor_temp_water_block_in] =
|
||
|
+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
|
||
|
+ [ec_sensor_temp_water_block_out] =
|
||
|
+ EC_SENSOR("Water_Block_Out", hwmon_temp, 1, 0x01, 0x03),
|
||
|
+ [ec_sensor_temp_sensor_extra_1] =
|
||
|
+ EC_SENSOR("Extra_1", hwmon_temp, 1, 0x01, 0x09),
|
||
|
+ [ec_sensor_temp_t_sensor_2] =
|
||
|
+ EC_SENSOR("T_sensor_2", hwmon_temp, 1, 0x01, 0x0a),
|
||
|
+ [ec_sensor_temp_sensor_extra_2] =
|
||
|
+ EC_SENSOR("Extra_2", hwmon_temp, 1, 0x01, 0x0b),
|
||
|
+ [ec_sensor_temp_sensor_extra_3] =
|
||
|
+ EC_SENSOR("Extra_3", hwmon_temp, 1, 0x01, 0x0c),
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_sensor_info sensors_family_intel_300[] = {
|
||
|
+ [ec_sensor_temp_chipset] =
|
||
|
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
|
||
|
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
|
||
|
+ [ec_sensor_temp_mb] =
|
||
|
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
|
||
|
+ [ec_sensor_temp_t_sensor] =
|
||
|
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
|
||
|
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
|
||
|
+ [ec_sensor_fan_cpu_opt] =
|
||
|
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
|
||
|
+ [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
|
||
|
+ [ec_sensor_fan_water_flow] =
|
||
|
+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
|
||
|
+ [ec_sensor_temp_water_in] =
|
||
|
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
|
||
|
+ [ec_sensor_temp_water_out] =
|
||
|
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_sensor_info sensors_family_intel_600[] = {
|
||
|
+ [ec_sensor_temp_t_sensor] =
|
||
|
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
|
||
|
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
|
||
|
};
|
||
|
|
||
|
/* Shortcuts for common combinations */
|
||
|
#define SENSOR_SET_TEMP_CHIPSET_CPU_MB \
|
||
|
(SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB)
|
||
|
#define SENSOR_SET_TEMP_WATER (SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT)
|
||
|
+#define SENSOR_SET_WATER_BLOCK \
|
||
|
+ (SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
|
||
|
|
||
|
struct ec_board_info {
|
||
|
- const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
|
||
|
unsigned long sensors;
|
||
|
/*
|
||
|
* Defines which mutex to use for guarding access to the state and the
|
||
|
@@ -216,121 +279,194 @@ struct ec_board_info {
|
||
|
enum board_family family;
|
||
|
};
|
||
|
|
||
|
-static const struct ec_board_info board_info[] = {
|
||
|
- {
|
||
|
- .board_names = {"PRIME X470-PRO"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_FAN_CPU_OPT |
|
||
|
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
|
||
|
- .family = family_amd_400_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"PRIME X570-PRO"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ProArt X570-CREATOR WIFI"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
|
||
|
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"Pro WS X570-ACE"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
|
||
|
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG CROSSHAIR VIII DARK HERO"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR |
|
||
|
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
|
||
|
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
|
||
|
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {
|
||
|
- "ROG CROSSHAIR VIII FORMULA",
|
||
|
- "ROG CROSSHAIR VIII HERO",
|
||
|
- "ROG CROSSHAIR VIII HERO (WI-FI)",
|
||
|
- },
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR |
|
||
|
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
|
||
|
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
|
||
|
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
|
||
|
- SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG CROSSHAIR VIII IMPACT"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
|
||
|
- SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG STRIX B550-E GAMING"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_FAN_CPU_OPT,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG STRIX B550-I GAMING"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
|
||
|
- SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG STRIX X570-E GAMING"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
|
||
|
- SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG STRIX X570-E GAMING WIFI II"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
|
||
|
- SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG STRIX X570-F GAMING"},
|
||
|
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {
|
||
|
- .board_names = {"ROG STRIX X570-I GAMING"},
|
||
|
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_FAN_VRM_HS |
|
||
|
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
|
||
|
- SENSOR_IN_CPU_CORE,
|
||
|
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
- .family = family_amd_500_series,
|
||
|
- },
|
||
|
- {}
|
||
|
+static const struct ec_board_info board_info_prime_x470_pro = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_FAN_CPU_OPT |
|
||
|
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
|
||
|
+ .family = family_amd_400_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_prime_x570_pro = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
|
||
|
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
|
||
|
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR |
|
||
|
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
|
||
|
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
|
||
|
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_crosshair_viii_hero = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR |
|
||
|
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
|
||
|
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
|
||
|
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
|
||
|
+ SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_maximus_xi_hero = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR |
|
||
|
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
|
||
|
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_intel_300_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_crosshair_viii_impact = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
|
||
|
+ SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_FAN_CPU_OPT,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
|
||
|
+ SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_x570_e_gaming = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
|
||
|
+ SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
|
||
|
+ SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_x570_f_gaming = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
|
||
|
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_x570_i_gaming = {
|
||
|
+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
|
||
|
+ SENSOR_TEMP_T_SENSOR |
|
||
|
+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
|
||
|
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
|
||
|
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
|
||
|
+ .family = family_intel_600_series,
|
||
|
+};
|
||
|
+
|
||
|
+static const struct ec_board_info board_info_zenith_ii_extreme = {
|
||
|
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
|
||
|
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
|
||
|
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
|
||
|
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
|
||
|
+ SENSOR_SET_WATER_BLOCK |
|
||
|
+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
|
||
|
+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
|
||
|
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
|
||
|
+ .family = family_amd_500_series,
|
||
|
+};
|
||
|
+
|
||
|
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
|
||
|
+ { \
|
||
|
+ .matches = { \
|
||
|
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, \
|
||
|
+ "ASUSTeK COMPUTER INC."), \
|
||
|
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
|
||
|
+ }, \
|
||
|
+ .driver_data = (void *)board_info, \
|
||
|
+ }
|
||
|
+
|
||
|
+static const struct dmi_system_id dmi_table[] = {
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
|
||
|
+ &board_info_prime_x470_pro),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
|
||
|
+ &board_info_prime_x570_pro),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
|
||
|
+ &board_info_pro_art_x570_creator_wifi),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
|
||
|
+ &board_info_pro_ws_x570_ace),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
|
||
|
+ &board_info_crosshair_viii_dark_hero),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA",
|
||
|
+ &board_info_crosshair_viii_hero),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO",
|
||
|
+ &board_info_crosshair_viii_hero),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
|
||
|
+ &board_info_crosshair_viii_hero),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
|
||
|
+ &board_info_maximus_xi_hero),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
|
||
|
+ &board_info_maximus_xi_hero),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
|
||
|
+ &board_info_crosshair_viii_impact),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
|
||
|
+ &board_info_strix_b550_e_gaming),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
|
||
|
+ &board_info_strix_b550_i_gaming),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
|
||
|
+ &board_info_strix_x570_e_gaming),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
|
||
|
+ &board_info_strix_x570_e_gaming_wifi_ii),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING",
|
||
|
+ &board_info_strix_x570_f_gaming),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
|
||
|
+ &board_info_strix_x570_i_gaming),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
|
||
|
+ &board_info_strix_z690_a_gaming_wifi_d4),
|
||
|
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
|
||
|
+ &board_info_zenith_ii_extreme),
|
||
|
+ {},
|
||
|
};
|
||
|
|
||
|
struct ec_sensor {
|
||
|
@@ -441,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec,
|
||
|
return -ENOENT;
|
||
|
}
|
||
|
|
||
|
-static int __init bank_compare(const void *a, const void *b)
|
||
|
+static int bank_compare(const void *a, const void *b)
|
||
|
{
|
||
|
return *((const s8 *)a) - *((const s8 *)b);
|
||
|
}
|
||
|
|
||
|
-static void __init setup_sensor_data(struct ec_sensors_data *ec)
|
||
|
+static void setup_sensor_data(struct ec_sensors_data *ec)
|
||
|
{
|
||
|
struct ec_sensor *s = ec->sensors;
|
||
|
bool bank_found;
|
||
|
@@ -478,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec)
|
||
|
sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
|
||
|
}
|
||
|
|
||
|
-static void __init fill_ec_registers(struct ec_sensors_data *ec)
|
||
|
+static void fill_ec_registers(struct ec_sensors_data *ec)
|
||
|
{
|
||
|
const struct ec_sensor_info *si;
|
||
|
unsigned int i, j, register_idx = 0;
|
||
|
@@ -493,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static int __init setup_lock_data(struct device *dev)
|
||
|
+static int setup_lock_data(struct device *dev)
|
||
|
{
|
||
|
const char *mutex_path;
|
||
|
int status;
|
||
|
@@ -716,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
|
||
|
return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
|
||
|
}
|
||
|
|
||
|
-static int __init
|
||
|
+static int
|
||
|
asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
|
||
|
struct device *dev, int num,
|
||
|
enum hwmon_sensor_types type, u32 config)
|
||
|
@@ -745,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = {
|
||
|
.ops = &asus_ec_hwmon_ops,
|
||
|
};
|
||
|
|
||
|
-static const struct ec_board_info * __init get_board_info(void)
|
||
|
+static const struct ec_board_info *get_board_info(void)
|
||
|
{
|
||
|
- const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
|
||
|
- const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME);
|
||
|
- const struct ec_board_info *board;
|
||
|
-
|
||
|
- if (!dmi_board_vendor || !dmi_board_name ||
|
||
|
- strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC."))
|
||
|
- return NULL;
|
||
|
-
|
||
|
- for (board = board_info; board->sensors; board++) {
|
||
|
- if (match_string(board->board_names,
|
||
|
- MAX_IDENTICAL_BOARD_VARIATIONS,
|
||
|
- dmi_board_name) >= 0)
|
||
|
- return board;
|
||
|
- }
|
||
|
+ const struct dmi_system_id *dmi_entry;
|
||
|
|
||
|
- return NULL;
|
||
|
+ dmi_entry = dmi_first_match(dmi_table);
|
||
|
+ return dmi_entry ? dmi_entry->driver_data : NULL;
|
||
|
}
|
||
|
|
||
|
-static int __init asus_ec_probe(struct platform_device *pdev)
|
||
|
+static int asus_ec_probe(struct platform_device *pdev)
|
||
|
{
|
||
|
const struct hwmon_channel_info **ptr_asus_ec_ci;
|
||
|
int nr_count[hwmon_max] = { 0 }, nr_types = 0;
|
||
|
@@ -799,6 +923,12 @@ static int __init asus_ec_probe(struct platform_device *pdev)
|
||
|
case family_amd_500_series:
|
||
|
ec_data->sensors_info = sensors_family_amd_500;
|
||
|
break;
|
||
|
+ case family_intel_300_series:
|
||
|
+ ec_data->sensors_info = sensors_family_intel_300;
|
||
|
+ break;
|
||
|
+ case family_intel_600_series:
|
||
|
+ ec_data->sensors_info = sensors_family_intel_600;
|
||
|
+ break;
|
||
|
default:
|
||
|
dev_err(dev, "Unknown board family: %d",
|
||
|
ec_data->board_info->family);
|
||
|
@@ -868,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev)
|
||
|
return PTR_ERR_OR_ZERO(hwdev);
|
||
|
}
|
||
|
|
||
|
-
|
||
|
-static const struct acpi_device_id acpi_ec_ids[] = {
|
||
|
- /* Embedded Controller Device */
|
||
|
- { "PNP0C09", 0 },
|
||
|
- {}
|
||
|
-};
|
||
|
+MODULE_DEVICE_TABLE(dmi, dmi_table);
|
||
|
|
||
|
static struct platform_driver asus_ec_sensors_platform_driver = {
|
||
|
.driver = {
|
||
|
.name = "asus-ec-sensors",
|
||
|
- .acpi_match_table = acpi_ec_ids,
|
||
|
},
|
||
|
+ .probe = asus_ec_probe,
|
||
|
};
|
||
|
|
||
|
-MODULE_DEVICE_TABLE(acpi, acpi_ec_ids);
|
||
|
-/*
|
||
|
- * we use module_platform_driver_probe() rather than module_platform_driver()
|
||
|
- * because the probe function (and its dependants) are marked with __init, which
|
||
|
- * means we can't put it into the .probe member of the platform_driver struct
|
||
|
- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init
|
||
|
- * because the object is referenced from the module exit code.
|
||
|
- */
|
||
|
-module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
|
||
|
+static struct platform_device *asus_ec_sensors_platform_device;
|
||
|
+
|
||
|
+static int __init asus_ec_init(void)
|
||
|
+{
|
||
|
+ asus_ec_sensors_platform_device =
|
||
|
+ platform_create_bundle(&asus_ec_sensors_platform_driver,
|
||
|
+ asus_ec_probe, NULL, 0, NULL, 0);
|
||
|
+
|
||
|
+ if (IS_ERR(asus_ec_sensors_platform_device))
|
||
|
+ return PTR_ERR(asus_ec_sensors_platform_device);
|
||
|
+
|
||
|
+ return 0;
|
||
|
+}
|
||
|
+
|
||
|
+static void __exit asus_ec_exit(void)
|
||
|
+{
|
||
|
+ platform_device_unregister(asus_ec_sensors_platform_device);
|
||
|
+ platform_driver_unregister(&asus_ec_sensors_platform_driver);
|
||
|
+}
|
||
|
+
|
||
|
+module_init(asus_ec_init);
|
||
|
+module_exit(asus_ec_exit);
|
||
|
|
||
|
module_param_named(mutex_path, mutex_path_override, charp, 0);
|
||
|
MODULE_PARM_DESC(mutex_path,
|
||
|
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
|
||
|
index 26278b0f17a98..9259779cc2dff 100644
|
||
|
--- a/drivers/hwmon/mr75203.c
|
||
|
+++ b/drivers/hwmon/mr75203.c
|
||
|
@@ -68,8 +68,9 @@
|
||
|
|
||
|
/* VM Individual Macro Register */
|
||
|
#define VM_COM_REG_SIZE 0x200
|
||
|
-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
|
||
|
-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
|
||
|
+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
|
||
|
+#define VM_SDIF_DATA(vm, ch) \
|
||
|
+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
|
||
|
|
||
|
/* SDA Slave Register */
|
||
|
#define IP_CTRL 0x00
|
||
|
@@ -115,6 +116,7 @@ struct pvt_device {
|
||
|
u32 t_num;
|
||
|
u32 p_num;
|
||
|
u32 v_num;
|
||
|
+ u32 c_num;
|
||
|
u32 ip_freq;
|
||
|
u8 *vm_idx;
|
||
|
};
|
||
|
@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
|
||
|
{
|
||
|
struct pvt_device *pvt = dev_get_drvdata(dev);
|
||
|
struct regmap *v_map = pvt->v_map;
|
||
|
+ u8 vm_idx, ch_idx;
|
||
|
u32 n, stat;
|
||
|
- u8 vm_idx;
|
||
|
int ret;
|
||
|
|
||
|
- if (channel >= pvt->v_num)
|
||
|
+ if (channel >= pvt->v_num * pvt->c_num)
|
||
|
return -EINVAL;
|
||
|
|
||
|
- vm_idx = pvt->vm_idx[channel];
|
||
|
+ vm_idx = pvt->vm_idx[channel / pvt->c_num];
|
||
|
+ ch_idx = channel % pvt->c_num;
|
||
|
|
||
|
switch (attr) {
|
||
|
case hwmon_in_input:
|
||
|
@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
|
||
|
if (ret)
|
||
|
return ret;
|
||
|
|
||
|
- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
|
||
|
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
|
||
|
if(ret < 0)
|
||
|
return ret;
|
||
|
|
||
|
n &= SAMPLE_DATA_MSK;
|
||
|
- /* Convert the N bitstream count into voltage */
|
||
|
- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
|
||
|
+ /*
|
||
|
+ * Convert the N bitstream count into voltage.
|
||
|
+ * To support negative voltage calculation for 64bit machines
|
||
|
+ * n must be cast to long, since n and *val differ both in
|
||
|
+ * signedness and in size.
|
||
|
+ * Division is used instead of right shift, because for signed
|
||
|
+ * numbers, the sign bit is used to fill the vacated bit
|
||
|
+ * positions, and if the number is negative, 1 is used.
|
||
|
+ * BIT(x) may not be used instead of (1 << x) because it's
|
||
|
+ * unsigned.
|
||
|
+ */
|
||
|
+ *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS);
|
||
|
|
||
|
return 0;
|
||
|
default:
|
||
|
@@ -375,6 +388,19 @@ static int pvt_init(struct pvt_device *pvt)
|
||
|
if (ret)
|
||
|
return ret;
|
||
|
|
||
|
+ val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
|
||
|
+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
|
||
|
+ ret = regmap_write(v_map, SDIF_W, val);
|
||
|
+ if (ret < 0)
|
||
|
+ return ret;
|
||
|
+
|
||
|
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
|
||
|
+ val, !(val & SDIF_BUSY),
|
||
|
+ PVT_POLL_DELAY_US,
|
||
|
+ PVT_POLL_TIMEOUT_US);
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
+
|
||
|
val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
|
||
|
CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
|
||
|
SDIF_WRN_W | SDIF_PROG;
|
||
|
@@ -489,8 +515,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt
|
||
|
|
||
|
static int mr75203_probe(struct platform_device *pdev)
|
||
|
{
|
||
|
+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
|
||
|
const struct hwmon_channel_info **pvt_info;
|
||
|
- u32 ts_num, vm_num, pd_num, val, index, i;
|
||
|
struct device *dev = &pdev->dev;
|
||
|
u32 *temp_config, *in_config;
|
||
|
struct device *hwmon_dev;
|
||
|
@@ -531,9 +557,11 @@ static int mr75203_probe(struct platform_device *pdev)
|
||
|
ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
|
||
|
pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
|
||
|
vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
|
||
|
+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
|
||
|
pvt->t_num = ts_num;
|
||
|
pvt->p_num = pd_num;
|
||
|
pvt->v_num = vm_num;
|
||
|
+ pvt->c_num = ch_num;
|
||
|
val = 0;
|
||
|
if (ts_num)
|
||
|
val++;
|
||
|
@@ -570,7 +598,7 @@ static int mr75203_probe(struct platform_device *pdev)
|
||
|
}
|
||
|
|
||
|
if (vm_num) {
|
||
|
- u32 num = vm_num;
|
||
|
+ u32 total_ch;
|
||
|
|
||
|
ret = pvt_get_regmap(pdev, "vm", pvt);
|
||
|
if (ret)
|
||
|
@@ -584,30 +612,30 @@ static int mr75203_probe(struct platform_device *pdev)
|
||
|
ret = device_property_read_u8_array(dev, "intel,vm-map",
|
||
|
pvt->vm_idx, vm_num);
|
||
|
if (ret) {
|
||
|
- num = 0;
|
||
|
+ /*
|
||
|
+ * Incase intel,vm-map property is not defined, we
|
||
|
+ * assume incremental channel numbers.
|
||
|
+ */
|
||
|
+ for (i = 0; i < vm_num; i++)
|
||
|
+ pvt->vm_idx[i] = i;
|
||
|
} else {
|
||
|
for (i = 0; i < vm_num; i++)
|
||
|
if (pvt->vm_idx[i] >= vm_num ||
|
||
|
pvt->vm_idx[i] == 0xff) {
|
||
|
- num = i;
|
||
|
+ pvt->v_num = i;
|
||
|
+ vm_num = i;
|
||
|
break;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
- /*
|
||
|
- * Incase intel,vm-map property is not defined, we assume
|
||
|
- * incremental channel numbers.
|
||
|
- */
|
||
|
- for (i = num; i < vm_num; i++)
|
||
|
- pvt->vm_idx[i] = i;
|
||
|
-
|
||
|
- in_config = devm_kcalloc(dev, num + 1,
|
||
|
+ total_ch = ch_num * vm_num;
|
||
|
+ in_config = devm_kcalloc(dev, total_ch + 1,
|
||
|
sizeof(*in_config), GFP_KERNEL);
|
||
|
if (!in_config)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
- memset32(in_config, HWMON_I_INPUT, num);
|
||
|
- in_config[num] = 0;
|
||
|
+ memset32(in_config, HWMON_I_INPUT, total_ch);
|
||
|
+ in_config[total_ch] = 0;
|
||
|
pvt_in.config = in_config;
|
||
|
|
||
|
pvt_info[index++] = &pvt_in;
|
||
|
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
|
||
|
index 8bd6435c13e82..2148fd543bb4b 100644
|
||
|
--- a/drivers/hwmon/tps23861.c
|
||
|
+++ b/drivers/hwmon/tps23861.c
|
||
|
@@ -489,18 +489,20 @@ static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
|
||
|
|
||
|
static int tps23861_port_resistance(struct tps23861_data *data, int port)
|
||
|
{
|
||
|
- u16 regval;
|
||
|
+ unsigned int raw_val;
|
||
|
+ __le16 regval;
|
||
|
|
||
|
regmap_bulk_read(data->regmap,
|
||
|
PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
|
||
|
®val,
|
||
|
2);
|
||
|
|
||
|
- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
|
||
|
+ raw_val = le16_to_cpu(regval);
|
||
|
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) {
|
||
|
case PORT_RESISTANCE_RSN_OTHER:
|
||
|
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
|
||
|
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000;
|
||
|
case PORT_RESISTANCE_RSN_LOW:
|
||
|
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
|
||
|
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000;
|
||
|
case PORT_RESISTANCE_RSN_SHORT:
|
||
|
case PORT_RESISTANCE_RSN_OPEN:
|
||
|
default:
|
||
|
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
|
||
|
index fabca5e51e3d4..4dd133eccfdfb 100644
|
||
|
--- a/drivers/infiniband/core/cma.c
|
||
|
+++ b/drivers/infiniband/core/cma.c
|
||
|
@@ -1719,8 +1719,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
|
||
|
}
|
||
|
|
||
|
if (!validate_net_dev(*net_dev,
|
||
|
- (struct sockaddr *)&req->listen_addr_storage,
|
||
|
- (struct sockaddr *)&req->src_addr_storage)) {
|
||
|
+ (struct sockaddr *)&req->src_addr_storage,
|
||
|
+ (struct sockaddr *)&req->listen_addr_storage)) {
|
||
|
id_priv = ERR_PTR(-EHOSTUNREACH);
|
||
|
goto err;
|
||
|
}
|
||
|
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
|
||
|
index 186ed8859920c..d39e16c211e8a 100644
|
||
|
--- a/drivers/infiniband/core/umem_odp.c
|
||
|
+++ b/drivers/infiniband/core/umem_odp.c
|
||
|
@@ -462,7 +462,7 @@ retry:
|
||
|
mutex_unlock(&umem_odp->umem_mutex);
|
||
|
|
||
|
out_put_mm:
|
||
|
- mmput(owning_mm);
|
||
|
+ mmput_async(owning_mm);
|
||
|
out_put_task:
|
||
|
if (owning_process)
|
||
|
put_task_struct(owning_process);
|
||
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
|
||
|
index 2855e9ad4b328..1df076e70e293 100644
|
||
|
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
|
||
|
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
|
||
|
@@ -730,7 +730,6 @@ struct hns_roce_caps {
|
||
|
u32 num_qps;
|
||
|
u32 num_pi_qps;
|
||
|
u32 reserved_qps;
|
||
|
- int num_qpc_timer;
|
||
|
u32 num_srqs;
|
||
|
u32 max_wqes;
|
||
|
u32 max_srq_wrs;
|
||
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
||
|
index b354caeaa9b29..49edff989f1f1 100644
|
||
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
||
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
|
||
|
@@ -1941,7 +1941,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
|
||
|
|
||
|
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
|
||
|
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
|
||
|
- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
|
||
|
+ caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
|
||
|
caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
|
||
|
|
||
|
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
|
||
|
@@ -2237,7 +2237,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
|
||
|
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
|
||
|
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
|
||
|
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
|
||
|
- caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
|
||
|
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
|
||
|
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
|
||
|
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
|
||
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
|
||
|
index 7ffb7824d2689..e4b640caee1b7 100644
|
||
|
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
|
||
|
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
|
||
|
@@ -36,11 +36,11 @@
|
||
|
#include <linux/bitops.h>
|
||
|
|
||
|
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
|
||
|
-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
|
||
|
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
|
||
|
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
|
||
|
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
|
||
|
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
|
||
|
+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100
|
||
|
#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
|
||
|
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
|
||
|
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
|
||
|
@@ -83,7 +83,7 @@
|
||
|
|
||
|
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
|
||
|
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
|
||
|
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
|
||
|
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
|
||
|
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
|
||
|
#define HNS_ROCE_INVALID_LKEY 0x0
|
||
|
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
|
||
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
|
||
|
index c8af4ebd7cbd3..4ccb217b2841d 100644
|
||
|
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
|
||
|
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
|
||
|
@@ -725,7 +725,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
||
|
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
|
||
|
HEM_TYPE_QPC_TIMER,
|
||
|
hr_dev->caps.qpc_timer_entry_sz,
|
||
|
- hr_dev->caps.num_qpc_timer, 1);
|
||
|
+ hr_dev->caps.qpc_timer_bt_num, 1);
|
||
|
if (ret) {
|
||
|
dev_err(dev,
|
||
|
"Failed to init QPC timer memory, aborting.\n");
|
||
|
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
|
||
|
index 48d3616a6d71d..7bee7f6c5e702 100644
|
||
|
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
|
||
|
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
|
||
|
@@ -462,11 +462,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
|
||
|
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
|
||
|
hr_qp->rq.rsv_sge);
|
||
|
|
||
|
- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
|
||
|
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
|
||
|
- else
|
||
|
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
|
||
|
- hr_qp->rq.max_gs);
|
||
|
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
|
||
|
+ hr_qp->rq.max_gs);
|
||
|
|
||
|
hr_qp->rq.wqe_cnt = cnt;
|
||
|
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
|
||
|
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
|
||
|
index daeab5daed5bc..d003ad864ee44 100644
|
||
|
--- a/drivers/infiniband/hw/irdma/uk.c
|
||
|
+++ b/drivers/infiniband/hw/irdma/uk.c
|
||
|
@@ -1005,6 +1005,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||
|
int ret_code;
|
||
|
bool move_cq_head = true;
|
||
|
u8 polarity;
|
||
|
+ u8 op_type;
|
||
|
bool ext_valid;
|
||
|
__le64 *ext_cqe;
|
||
|
|
||
|
@@ -1187,7 +1188,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||
|
do {
|
||
|
__le64 *sw_wqe;
|
||
|
u64 wqe_qword;
|
||
|
- u8 op_type;
|
||
|
u32 tail;
|
||
|
|
||
|
tail = qp->sq_ring.tail;
|
||
|
@@ -1204,6 +1204,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||
|
break;
|
||
|
}
|
||
|
} while (1);
|
||
|
+ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
|
||
|
+ info->minor_err = FLUSH_MW_BIND_ERR;
|
||
|
qp->sq_flush_seen = true;
|
||
|
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
|
||
|
qp->sq_flush_complete = true;
|
||
|
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
|
||
|
index ab3c5208a1231..f4d774451160d 100644
|
||
|
--- a/drivers/infiniband/hw/irdma/utils.c
|
||
|
+++ b/drivers/infiniband/hw/irdma/utils.c
|
||
|
@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
|
||
|
cqp_error = cqp_request->compl_info.error;
|
||
|
if (cqp_error) {
|
||
|
err_code = -EIO;
|
||
|
- if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
|
||
|
- cqp_request->compl_info.min_err_code == 0x8029) {
|
||
|
- if (!rf->reset) {
|
||
|
- rf->reset = true;
|
||
|
- rf->gen_ops.request_reset(rf);
|
||
|
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
|
||
|
+ if (cqp_request->compl_info.min_err_code == 0x8002)
|
||
|
+ err_code = -EBUSY;
|
||
|
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
|
||
|
+ if (!rf->reset) {
|
||
|
+ rf->reset = true;
|
||
|
+ rf->gen_ops.request_reset(rf);
|
||
|
+ }
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
@@ -2597,7 +2600,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
|
||
|
spin_unlock_irqrestore(&iwqp->lock, flags2);
|
||
|
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
|
||
|
if (compl_generated)
|
||
|
- irdma_comp_handler(iwqp->iwrcq);
|
||
|
+ irdma_comp_handler(iwqp->iwscq);
|
||
|
} else {
|
||
|
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
|
||
|
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
|
||
|
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
|
||
|
index 227a799385d1d..ab73d1715f991 100644
|
||
|
--- a/drivers/infiniband/hw/irdma/verbs.c
|
||
|
+++ b/drivers/infiniband/hw/irdma/verbs.c
|
||
|
@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev,
|
||
|
props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
|
||
|
props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
|
||
|
props->max_cq = rf->max_cq - rf->used_cqs;
|
||
|
- props->max_cqe = rf->max_cqe;
|
||
|
+ props->max_cqe = rf->max_cqe - 1;
|
||
|
props->max_mr = rf->max_mr - rf->used_mrs;
|
||
|
props->max_mw = props->max_mr;
|
||
|
props->max_pd = rf->max_pd - rf->used_pds;
|
||
|
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
|
||
|
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
|
||
|
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
|
||
|
- if (rdma_protocol_roce(ibdev, 1))
|
||
|
+ if (rdma_protocol_roce(ibdev, 1)) {
|
||
|
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
|
||
|
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
|
||
|
+ }
|
||
|
+
|
||
|
props->max_ah = rf->max_ah;
|
||
|
props->max_mcast_grp = rf->max_mcg;
|
||
|
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
|
||
|
@@ -3001,6 +3004,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
|
||
|
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
|
||
|
struct irdma_cqp_request *cqp_request;
|
||
|
struct cqp_cmds_info *cqp_info;
|
||
|
+ int status;
|
||
|
|
||
|
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
|
||
|
if (iwmr->region) {
|
||
|
@@ -3031,8 +3035,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
|
||
|
cqp_info->post_sq = 1;
|
||
|
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
|
||
|
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
|
||
|
- irdma_handle_cqp_op(iwdev->rf, cqp_request);
|
||
|
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
|
||
|
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
|
||
|
+ if (status)
|
||
|
+ return status;
|
||
|
+
|
||
|
irdma_free_stag(iwdev, iwmr->stag);
|
||
|
done:
|
||
|
if (iwpbl->pbl_allocated)
|
||
|
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
|
||
|
index 293ed709e5ed5..b4dc52392275b 100644
|
||
|
--- a/drivers/infiniband/hw/mlx5/mad.c
|
||
|
+++ b/drivers/infiniband/hw/mlx5/mad.c
|
||
|
@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
|
||
|
mdev = dev->mdev;
|
||
|
mdev_port_num = 1;
|
||
|
}
|
||
|
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
|
||
|
+ /* set local port to one for Function-Per-Port HCA. */
|
||
|
+ mdev = dev->mdev;
|
||
|
+ mdev_port_num = 1;
|
||
|
+ }
|
||
|
+
|
||
|
/* Declaring support of extended counters */
|
||
|
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
|
||
|
struct ib_class_port_info cpi = {};
|
||
|
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
|
||
|
index 1f4e60257700e..7d47b521070b1 100644
|
||
|
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
|
||
|
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
|
||
|
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
|
||
|
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
|
||
|
|
||
|
if (paddr)
|
||
|
- return virt_to_page(paddr);
|
||
|
+ return virt_to_page((void *)paddr);
|
||
|
|
||
|
return NULL;
|
||
|
}
|
||
|
@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||
|
kunmap_local(kaddr);
|
||
|
}
|
||
|
} else {
|
||
|
- u64 va = sge->laddr + sge_off;
|
||
|
+ /*
|
||
|
+ * Cast to an uintptr_t to preserve all 64 bits
|
||
|
+ * in sge->laddr.
|
||
|
+ */
|
||
|
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
|
||
|
|
||
|
- page_array[seg] = virt_to_page(va & PAGE_MASK);
|
||
|
+ /*
|
||
|
+ * virt_to_page() takes a (void *) pointer
|
||
|
+ * so cast to a (void *) meaning it will be 64
|
||
|
+ * bits on a 64 bit platform and 32 bits on a
|
||
|
+ * 32 bit platform.
|
||
|
+ */
|
||
|
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
|
||
|
if (do_crc)
|
||
|
crypto_shash_update(
|
||
|
c_tx->mpa_crc_hd,
|
||
|
- (void *)(uintptr_t)va,
|
||
|
+ (void *)va,
|
||
|
plen);
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
|
||
|
index 525f083fcaeb4..bf464400a4409 100644
|
||
|
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
|
||
|
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
|
||
|
@@ -1004,7 +1004,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
|
||
|
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
|
||
|
struct rtrs_clt_io_req *req,
|
||
|
struct rtrs_rbuf *rbuf, bool fr_en,
|
||
|
- u32 size, u32 imm, struct ib_send_wr *wr,
|
||
|
+ u32 count, u32 size, u32 imm,
|
||
|
+ struct ib_send_wr *wr,
|
||
|
struct ib_send_wr *tail)
|
||
|
{
|
||
|
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
|
||
|
@@ -1024,12 +1025,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
|
||
|
num_sge = 2;
|
||
|
ptail = tail;
|
||
|
} else {
|
||
|
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
|
||
|
+ for_each_sg(req->sglist, sg, count, i) {
|
||
|
sge[i].addr = sg_dma_address(sg);
|
||
|
sge[i].length = sg_dma_len(sg);
|
||
|
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
|
||
|
}
|
||
|
- num_sge = 1 + req->sg_cnt;
|
||
|
+ num_sge = 1 + count;
|
||
|
}
|
||
|
sge[i].addr = req->iu->dma_addr;
|
||
|
sge[i].length = size;
|
||
|
@@ -1142,7 +1143,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
|
||
|
*/
|
||
|
rtrs_clt_update_all_stats(req, WRITE);
|
||
|
|
||
|
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
|
||
|
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
|
||
|
req->usr_len + sizeof(*msg),
|
||
|
imm, wr, &inv_wr);
|
||
|
if (ret) {
|
||
|
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
|
||
|
index 24024bce25664..ee4876bdce4ac 100644
|
||
|
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
|
||
|
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
|
||
|
@@ -600,7 +600,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
|
||
|
struct sg_table *sgt = &srv_mr->sgt;
|
||
|
struct scatterlist *s;
|
||
|
struct ib_mr *mr;
|
||
|
- int nr, chunks;
|
||
|
+ int nr, nr_sgt, chunks;
|
||
|
|
||
|
chunks = chunks_per_mr * mri;
|
||
|
if (!always_invalidate)
|
||
|
@@ -615,19 +615,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
|
||
|
sg_set_page(s, srv->chunks[chunks + i],
|
||
|
max_chunk_size, 0);
|
||
|
|
||
|
- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
|
||
|
+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
|
||
|
sgt->nents, DMA_BIDIRECTIONAL);
|
||
|
- if (nr < sgt->nents) {
|
||
|
- err = nr < 0 ? nr : -EINVAL;
|
||
|
+ if (!nr_sgt) {
|
||
|
+ err = -EINVAL;
|
||
|
goto free_sg;
|
||
|
}
|
||
|
mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
|
||
|
- sgt->nents);
|
||
|
+ nr_sgt);
|
||
|
if (IS_ERR(mr)) {
|
||
|
err = PTR_ERR(mr);
|
||
|
goto unmap_sg;
|
||
|
}
|
||
|
- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
|
||
|
+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
|
||
|
NULL, max_chunk_size);
|
||
|
if (nr < 0 || nr < sgt->nents) {
|
||
|
err = nr < 0 ? nr : -EINVAL;
|
||
|
@@ -646,7 +646,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
|
||
|
}
|
||
|
}
|
||
|
/* Eventually dma addr for each chunk can be cached */
|
||
|
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
|
||
|
+ for_each_sg(sgt->sgl, s, nr_sgt, i)
|
||
|
srv_path->dma_addr[chunks + i] = sg_dma_address(s);
|
||
|
|
||
|
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
|
||
|
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
|
||
|
index 6058abf42ba74..3d9c108d73ad8 100644
|
||
|
--- a/drivers/infiniband/ulp/srp/ib_srp.c
|
||
|
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
|
||
|
@@ -1962,7 +1962,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
|
||
|
if (scmnd) {
|
||
|
req = scsi_cmd_priv(scmnd);
|
||
|
scmnd = srp_claim_req(ch, req, NULL, scmnd);
|
||
|
- } else {
|
||
|
+ }
|
||
|
+ if (!scmnd) {
|
||
|
shost_printk(KERN_ERR, target->scsi_host,
|
||
|
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
|
||
|
rsp->tag, ch - target->ch, ch->qp->qp_num);
|
||
|
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
|
||
|
index 840831d5d2ad9..a0924144bac80 100644
|
||
|
--- a/drivers/iommu/amd/iommu.c
|
||
|
+++ b/drivers/iommu/amd/iommu.c
|
||
|
@@ -874,7 +874,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
|
||
|
memset(cmd, 0, sizeof(*cmd));
|
||
|
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
|
||
|
cmd->data[1] = upper_32_bits(paddr);
|
||
|
- cmd->data[2] = data;
|
||
|
+ cmd->data[2] = lower_32_bits(data);
|
||
|
+ cmd->data[3] = upper_32_bits(data);
|
||
|
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
|
||
|
index afb3efd565b78..f3e2689787ae5 100644
|
||
|
--- a/drivers/iommu/amd/iommu_v2.c
|
||
|
+++ b/drivers/iommu/amd/iommu_v2.c
|
||
|
@@ -786,6 +786,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||
|
if (dev_state->domain == NULL)
|
||
|
goto out_free_states;
|
||
|
|
||
|
+ /* See iommu_is_default_domain() */
|
||
|
+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
|
||
|
amd_iommu_domain_direct_map(dev_state->domain);
|
||
|
|
||
|
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
|
||
|
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
|
||
|
index 64b14ac4c7b02..fc8c1420c0b69 100644
|
||
|
--- a/drivers/iommu/intel/dmar.c
|
||
|
+++ b/drivers/iommu/intel/dmar.c
|
||
|
@@ -2368,6 +2368,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
|
||
|
if (!dmar_in_use())
|
||
|
return 0;
|
||
|
|
||
|
+ /*
|
||
|
+ * It's unlikely that any I/O board is hot added before the IOMMU
|
||
|
+ * subsystem is initialized.
|
||
|
+ */
|
||
|
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
|
||
|
+ return -EOPNOTSUPP;
|
||
|
+
|
||
|
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
|
||
|
tmp = handle;
|
||
|
} else {
|
||
|
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
|
||
|
index 5c0dce78586aa..40ac3a78d90ef 100644
|
||
|
--- a/drivers/iommu/intel/iommu.c
|
||
|
+++ b/drivers/iommu/intel/iommu.c
|
||
|
@@ -422,14 +422,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
|
||
|
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
|
||
|
}
|
||
|
|
||
|
+/*
|
||
|
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
|
||
|
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
|
||
|
+ * the returned SAGAW.
|
||
|
+ */
|
||
|
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
|
||
|
+{
|
||
|
+ unsigned long fl_sagaw, sl_sagaw;
|
||
|
+
|
||
|
+ fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
|
||
|
+ sl_sagaw = cap_sagaw(iommu->cap);
|
||
|
+
|
||
|
+ /* Second level only. */
|
||
|
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
|
||
|
+ return sl_sagaw;
|
||
|
+
|
||
|
+ /* First level only. */
|
||
|
+ if (!ecap_slts(iommu->ecap))
|
||
|
+ return fl_sagaw;
|
||
|
+
|
||
|
+ return fl_sagaw & sl_sagaw;
|
||
|
+}
|
||
|
+
|
||
|
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
|
||
|
{
|
||
|
unsigned long sagaw;
|
||
|
int agaw;
|
||
|
|
||
|
- sagaw = cap_sagaw(iommu->cap);
|
||
|
- for (agaw = width_to_agaw(max_gaw);
|
||
|
- agaw >= 0; agaw--) {
|
||
|
+ sagaw = __iommu_calculate_sagaw(iommu);
|
||
|
+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
|
||
|
if (test_bit(agaw, &sagaw))
|
||
|
break;
|
||
|
}
|
||
|
@@ -3123,13 +3145,7 @@ static int __init init_dmars(void)
|
||
|
|
||
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||
|
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
|
||
|
- /*
|
||
|
- * Call dmar_alloc_hwirq() with dmar_global_lock held,
|
||
|
- * could cause possible lock race condition.
|
||
|
- */
|
||
|
- up_write(&dmar_global_lock);
|
||
|
ret = intel_svm_enable_prq(iommu);
|
||
|
- down_write(&dmar_global_lock);
|
||
|
if (ret)
|
||
|
goto free_iommu;
|
||
|
}
|
||
|
@@ -4035,7 +4051,6 @@ int __init intel_iommu_init(void)
|
||
|
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
|
||
|
platform_optin_force_iommu();
|
||
|
|
||
|
- down_write(&dmar_global_lock);
|
||
|
if (dmar_table_init()) {
|
||
|
if (force_on)
|
||
|
panic("tboot: Failed to initialize DMAR table\n");
|
||
|
@@ -4048,16 +4063,6 @@ int __init intel_iommu_init(void)
|
||
|
goto out_free_dmar;
|
||
|
}
|
||
|
|
||
|
- up_write(&dmar_global_lock);
|
||
|
-
|
||
|
- /*
|
||
|
- * The bus notifier takes the dmar_global_lock, so lockdep will
|
||
|
- * complain later when we register it under the lock.
|
||
|
- */
|
||
|
- dmar_register_bus_notifier();
|
||
|
-
|
||
|
- down_write(&dmar_global_lock);
|
||
|
-
|
||
|
if (!no_iommu)
|
||
|
intel_iommu_debugfs_init();
|
||
|
|
||
|
@@ -4105,11 +4110,9 @@ int __init intel_iommu_init(void)
|
||
|
pr_err("Initialization failed\n");
|
||
|
goto out_free_dmar;
|
||
|
}
|
||
|
- up_write(&dmar_global_lock);
|
||
|
|
||
|
init_iommu_pm_ops();
|
||
|
|
||
|
- down_read(&dmar_global_lock);
|
||
|
for_each_active_iommu(iommu, drhd) {
|
||
|
/*
|
||
|
* The flush queue implementation does not perform
|
||
|
@@ -4127,13 +4130,11 @@ int __init intel_iommu_init(void)
|
||
|
"%s", iommu->name);
|
||
|
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
|
||
|
}
|
||
|
- up_read(&dmar_global_lock);
|
||
|
|
||
|
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
|
||
|
if (si_domain && !hw_pass_through)
|
||
|
register_memory_notifier(&intel_iommu_memory_nb);
|
||
|
|
||
|
- down_read(&dmar_global_lock);
|
||
|
if (probe_acpi_namespace_devices())
|
||
|
pr_warn("ACPI name space devices didn't probe correctly\n");
|
||
|
|
||
|
@@ -4144,17 +4145,15 @@ int __init intel_iommu_init(void)
|
||
|
|
||
|
iommu_disable_protect_mem_regions(iommu);
|
||
|
}
|
||
|
- up_read(&dmar_global_lock);
|
||
|
-
|
||
|
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
|
||
|
|
||
|
intel_iommu_enabled = 1;
|
||
|
+ dmar_register_bus_notifier();
|
||
|
+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
|
||
|
|
||
|
return 0;
|
||
|
|
||
|
out_free_dmar:
|
||
|
intel_iommu_free_dmars();
|
||
|
- up_write(&dmar_global_lock);
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
|
||
|
index 847ad47a2dfd3..f113833c3075c 100644
|
||
|
--- a/drivers/iommu/iommu.c
|
||
|
+++ b/drivers/iommu/iommu.c
|
||
|
@@ -3089,6 +3089,24 @@ out:
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
+static bool iommu_is_default_domain(struct iommu_group *group)
|
||
|
+{
|
||
|
+ if (group->domain == group->default_domain)
|
||
|
+ return true;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * If the default domain was set to identity and it is still an identity
|
||
|
+ * domain then we consider this a pass. This happens because of
|
||
|
+ * amd_iommu_init_device() replacing the default idenytity domain with an
|
||
|
+ * identity domain that has a different configuration for AMDGPU.
|
||
|
+ */
|
||
|
+ if (group->default_domain &&
|
||
|
+ group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
|
||
|
+ group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
|
||
|
+ return true;
|
||
|
+ return false;
|
||
|
+}
|
||
|
+
|
||
|
/**
|
||
|
* iommu_device_use_default_domain() - Device driver wants to handle device
|
||
|
* DMA through the kernel DMA API.
|
||
|
@@ -3107,8 +3125,7 @@ int iommu_device_use_default_domain(struct device *dev)
|
||
|
|
||
|
mutex_lock(&group->mutex);
|
||
|
if (group->owner_cnt) {
|
||
|
- if (group->domain != group->default_domain ||
|
||
|
- group->owner) {
|
||
|
+ if (group->owner || !iommu_is_default_domain(group)) {
|
||
|
ret = -EBUSY;
|
||
|
goto unlock_out;
|
||
|
}
|
||
|
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
|
||
|
index 25be4b822aa07..bf340d779c10b 100644
|
||
|
--- a/drivers/iommu/virtio-iommu.c
|
||
|
+++ b/drivers/iommu/virtio-iommu.c
|
||
|
@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||
|
return iommu_fwspec_add_ids(dev, args->args, 1);
|
||
|
}
|
||
|
|
||
|
+static bool viommu_capable(enum iommu_cap cap)
|
||
|
+{
|
||
|
+ switch (cap) {
|
||
|
+ case IOMMU_CAP_CACHE_COHERENCY:
|
||
|
+ return true;
|
||
|
+ default:
|
||
|
+ return false;
|
||
|
+ }
|
||
|
+}
|
||
|
+
|
||
|
static struct iommu_ops viommu_ops = {
|
||
|
+ .capable = viommu_capable,
|
||
|
.domain_alloc = viommu_domain_alloc,
|
||
|
.probe_device = viommu_probe_device,
|
||
|
.probe_finalize = viommu_probe_finalize,
|
||
|
diff --git a/drivers/md/md.c b/drivers/md/md.c
|
||
|
index 91e7e80fce489..25d18b67a1620 100644
|
||
|
--- a/drivers/md/md.c
|
||
|
+++ b/drivers/md/md.c
|
||
|
@@ -5647,6 +5647,7 @@ static int md_alloc(dev_t dev, char *name)
|
||
|
* removed (mddev_delayed_delete).
|
||
|
*/
|
||
|
flush_workqueue(md_misc_wq);
|
||
|
+ flush_workqueue(md_rdev_misc_wq);
|
||
|
|
||
|
mutex_lock(&disks_mutex);
|
||
|
mddev = mddev_alloc(dev);
|
||
|
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
|
||
|
index 6ba4c83fe5fc0..bff0bfd10e235 100644
|
||
|
--- a/drivers/net/bonding/bond_main.c
|
||
|
+++ b/drivers/net/bonding/bond_main.c
|
||
|
@@ -1974,6 +1974,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
||
|
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
|
||
|
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
|
||
|
|
||
|
+ new_slave->last_tx = new_slave->last_rx;
|
||
|
+
|
||
|
if (bond->params.miimon && !bond->params.use_carrier) {
|
||
|
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
|
||
|
|
||
|
@@ -2857,8 +2859,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
- if (bond_handle_vlan(slave, tags, skb))
|
||
|
+ if (bond_handle_vlan(slave, tags, skb)) {
|
||
|
+ slave_update_last_tx(slave);
|
||
|
arp_xmit(skb);
|
||
|
+ }
|
||
|
+
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
@@ -3047,8 +3052,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
|
||
|
curr_active_slave->last_link_up))
|
||
|
bond_validate_arp(bond, slave, tip, sip);
|
||
|
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
|
||
|
- bond_time_in_interval(bond,
|
||
|
- dev_trans_start(curr_arp_slave->dev), 1))
|
||
|
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
|
||
|
bond_validate_arp(bond, slave, sip, tip);
|
||
|
|
||
|
out_unlock:
|
||
|
@@ -3076,8 +3080,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
|
||
|
}
|
||
|
|
||
|
addrconf_addr_solict_mult(daddr, &mcaddr);
|
||
|
- if (bond_handle_vlan(slave, tags, skb))
|
||
|
+ if (bond_handle_vlan(slave, tags, skb)) {
|
||
|
+ slave_update_last_tx(slave);
|
||
|
ndisc_send_skb(skb, &mcaddr, saddr);
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
|
||
|
@@ -3134,6 +3140,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
|
||
|
found:
|
||
|
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
|
||
|
bond_ns_send(slave, &targets[i], &saddr, tags);
|
||
|
+ else
|
||
|
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
|
||
|
+
|
||
|
dst_release(dst);
|
||
|
kfree(tags);
|
||
|
}
|
||
|
@@ -3165,12 +3174,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
-static void bond_validate_ns(struct bonding *bond, struct slave *slave,
|
||
|
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
|
||
|
struct in6_addr *saddr, struct in6_addr *daddr)
|
||
|
{
|
||
|
int i;
|
||
|
|
||
|
- if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
|
||
|
+ /* Ignore NAs that:
|
||
|
+ * 1. Source address is unspecified address.
|
||
|
+ * 2. Dest address is neither all-nodes multicast address nor
|
||
|
+ * exist on bond interface.
|
||
|
+ */
|
||
|
+ if (ipv6_addr_any(saddr) ||
|
||
|
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
|
||
|
+ !bond_has_this_ip6(bond, daddr))) {
|
||
|
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
|
||
|
__func__, saddr, daddr);
|
||
|
return;
|
||
|
@@ -3213,15 +3229,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
|
||
|
* see bond_arp_rcv().
|
||
|
*/
|
||
|
if (bond_is_active_slave(slave))
|
||
|
- bond_validate_ns(bond, slave, saddr, daddr);
|
||
|
+ bond_validate_na(bond, slave, saddr, daddr);
|
||
|
else if (curr_active_slave &&
|
||
|
time_after(slave_last_rx(bond, curr_active_slave),
|
||
|
curr_active_slave->last_link_up))
|
||
|
- bond_validate_ns(bond, slave, saddr, daddr);
|
||
|
+ bond_validate_na(bond, slave, saddr, daddr);
|
||
|
else if (curr_arp_slave &&
|
||
|
- bond_time_in_interval(bond,
|
||
|
- dev_trans_start(curr_arp_slave->dev), 1))
|
||
|
- bond_validate_ns(bond, slave, saddr, daddr);
|
||
|
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
|
||
|
+ bond_validate_na(bond, slave, saddr, daddr);
|
||
|
|
||
|
out:
|
||
|
return RX_HANDLER_ANOTHER;
|
||
|
@@ -3308,12 +3323,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
|
||
|
* so it can wait
|
||
|
*/
|
||
|
bond_for_each_slave_rcu(bond, slave, iter) {
|
||
|
- unsigned long trans_start = dev_trans_start(slave->dev);
|
||
|
+ unsigned long last_tx = slave_last_tx(slave);
|
||
|
|
||
|
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
|
||
|
|
||
|
if (slave->link != BOND_LINK_UP) {
|
||
|
- if (bond_time_in_interval(bond, trans_start, 1) &&
|
||
|
+ if (bond_time_in_interval(bond, last_tx, 1) &&
|
||
|
bond_time_in_interval(bond, slave->last_rx, 1)) {
|
||
|
|
||
|
bond_propose_link_state(slave, BOND_LINK_UP);
|
||
|
@@ -3338,7 +3353,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
|
||
|
* when the source ip is 0, so don't take the link down
|
||
|
* if we don't know our ip yet
|
||
|
*/
|
||
|
- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
|
||
|
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
|
||
|
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
|
||
|
|
||
|
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||
|
@@ -3404,7 +3419,7 @@ re_arm:
|
||
|
*/
|
||
|
static int bond_ab_arp_inspect(struct bonding *bond)
|
||
|
{
|
||
|
- unsigned long trans_start, last_rx;
|
||
|
+ unsigned long last_tx, last_rx;
|
||
|
struct list_head *iter;
|
||
|
struct slave *slave;
|
||
|
int commit = 0;
|
||
|
@@ -3455,9 +3470,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
||
|
* - (more than missed_max*delta since receive AND
|
||
|
* the bond has an IP address)
|
||
|
*/
|
||
|
- trans_start = dev_trans_start(slave->dev);
|
||
|
+ last_tx = slave_last_tx(slave);
|
||
|
if (bond_is_active_slave(slave) &&
|
||
|
- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
|
||
|
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
|
||
|
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
|
||
|
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||
|
commit++;
|
||
|
@@ -3474,8 +3489,8 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
||
|
*/
|
||
|
static void bond_ab_arp_commit(struct bonding *bond)
|
||
|
{
|
||
|
- unsigned long trans_start;
|
||
|
struct list_head *iter;
|
||
|
+ unsigned long last_tx;
|
||
|
struct slave *slave;
|
||
|
|
||
|
bond_for_each_slave(bond, slave, iter) {
|
||
|
@@ -3484,10 +3499,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
|
||
|
continue;
|
||
|
|
||
|
case BOND_LINK_UP:
|
||
|
- trans_start = dev_trans_start(slave->dev);
|
||
|
+ last_tx = slave_last_tx(slave);
|
||
|
if (rtnl_dereference(bond->curr_active_slave) != slave ||
|
||
|
(!rtnl_dereference(bond->curr_active_slave) &&
|
||
|
- bond_time_in_interval(bond, trans_start, 1))) {
|
||
|
+ bond_time_in_interval(bond, last_tx, 1))) {
|
||
|
struct slave *current_arp_slave;
|
||
|
|
||
|
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
|
||
|
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
|
||
|
index 6439b56f381f9..517bc3922ee24 100644
|
||
|
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
|
||
|
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
|
||
|
@@ -16,11 +16,13 @@
|
||
|
#include <linux/iopoll.h>
|
||
|
#include <linux/mdio.h>
|
||
|
#include <linux/pci.h>
|
||
|
+#include <linux/time.h>
|
||
|
#include "felix.h"
|
||
|
|
||
|
#define VSC9959_NUM_PORTS 6
|
||
|
|
||
|
#define VSC9959_TAS_GCL_ENTRY_MAX 63
|
||
|
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
|
||
|
#define VSC9959_VCAP_POLICER_BASE 63
|
||
|
#define VSC9959_VCAP_POLICER_MAX 383
|
||
|
#define VSC9959_SWITCH_PCI_BAR 4
|
||
|
@@ -1410,6 +1412,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
|
||
|
mdiobus_free(felix->imdio);
|
||
|
}
|
||
|
|
||
|
+/* The switch considers any frame (regardless of size) as eligible for
|
||
|
+ * transmission if the traffic class gate is open for at least 33 ns.
|
||
|
+ * Overruns are prevented by cropping an interval at the end of the gate time
|
||
|
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
|
||
|
+ * available for one packet to be transmitted, otherwise the port tc will hang.
|
||
|
+ * This function returns the size of a gate interval that remains available for
|
||
|
+ * setting the guard band, after reserving the space for one egress frame.
|
||
|
+ */
|
||
|
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
|
||
|
+{
|
||
|
+ /* Gate always open */
|
||
|
+ if (gate_len_ns == U64_MAX)
|
||
|
+ return U64_MAX;
|
||
|
+
|
||
|
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
|
||
|
+}
|
||
|
+
|
||
|
/* Extract shortest continuous gate open intervals in ns for each traffic class
|
||
|
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
|
||
|
* considered U64_MAX. If the gate is always closed, it is considered 0.
|
||
|
@@ -1471,6 +1490,65 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
|
||
|
min_gate_len[tc] = 0;
|
||
|
}
|
||
|
|
||
|
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
|
||
|
+ * so we need to spell out the register access to each traffic class in helper
|
||
|
+ * functions, to simplify callers
|
||
|
+ */
|
||
|
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
|
||
|
+ u32 max_sdu)
|
||
|
+{
|
||
|
+ switch (tc) {
|
||
|
+ case 0:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 1:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 2:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 3:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 4:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 5:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 6:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ case 7:
|
||
|
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
|
||
|
+ port);
|
||
|
+ break;
|
||
|
+ }
|
||
|
+}
|
||
|
+
|
||
|
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
|
||
|
+{
|
||
|
+ switch (tc) {
|
||
|
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
|
||
|
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
|
||
|
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
|
||
|
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
|
||
|
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
|
||
|
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
|
||
|
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
|
||
|
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
|
||
|
+ default:
|
||
|
+ return 0;
|
||
|
+ }
|
||
|
+}
|
||
|
+
|
||
|
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
|
||
|
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
|
||
|
* values (the default value is 1518). Also, for traffic class windows smaller
|
||
|
@@ -1527,11 +1605,16 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
||
|
|
||
|
vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
|
||
|
|
||
|
+ mutex_lock(&ocelot->fwd_domain_lock);
|
||
|
+
|
||
|
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
|
||
|
+ u64 remaining_gate_len_ps;
|
||
|
u32 max_sdu;
|
||
|
|
||
|
- if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
|
||
|
- min_gate_len[tc] * 1000 > needed_bit_time_ps) {
|
||
|
+ remaining_gate_len_ps =
|
||
|
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
|
||
|
+
|
||
|
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
|
||
|
/* Setting QMAXSDU_CFG to 0 disables oversized frame
|
||
|
* dropping.
|
||
|
*/
|
||
|
@@ -1544,9 +1627,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
||
|
/* If traffic class doesn't support a full MTU sized
|
||
|
* frame, make sure to enable oversize frame dropping
|
||
|
* for frames larger than the smallest that would fit.
|
||
|
+ *
|
||
|
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
|
||
|
+ * controls not only oversized frame dropping, but also
|
||
|
+ * per-tc static guard band lengths, so it reduces the
|
||
|
+ * useful gate interval length. Therefore, be careful
|
||
|
+ * to calculate a guard band (and therefore max_sdu)
|
||
|
+ * that still leaves 33 ns available in the time slot.
|
||
|
*/
|
||
|
- max_sdu = div_u64(min_gate_len[tc] * 1000,
|
||
|
- picos_per_byte);
|
||
|
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
|
||
|
/* A TC gate may be completely closed, which is a
|
||
|
* special case where all packets are oversized.
|
||
|
* Any limit smaller than 64 octets accomplishes this
|
||
|
@@ -1569,47 +1658,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
|
||
|
max_sdu);
|
||
|
}
|
||
|
|
||
|
- /* ocelot_write_rix is a macro that concatenates
|
||
|
- * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
|
||
|
- * the writes to each traffic class
|
||
|
- */
|
||
|
- switch (tc) {
|
||
|
- case 0:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 1:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 2:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 3:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 4:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 5:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 6:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
|
||
|
- port);
|
||
|
- break;
|
||
|
- case 7:
|
||
|
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
|
||
|
- port);
|
||
|
- break;
|
||
|
- }
|
||
|
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
|
||
|
}
|
||
|
|
||
|
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
|
||
|
+
|
||
|
+ ocelot->ops->cut_through_fwd(ocelot);
|
||
|
+
|
||
|
+ mutex_unlock(&ocelot->fwd_domain_lock);
|
||
|
}
|
||
|
|
||
|
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
|
||
|
@@ -1636,13 +1692,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
+ mutex_lock(&ocelot->tas_lock);
|
||
|
+
|
||
|
ocelot_rmw_rix(ocelot,
|
||
|
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
|
||
|
QSYS_TAG_CONFIG_LINK_SPEED_M,
|
||
|
QSYS_TAG_CONFIG, port);
|
||
|
|
||
|
- mutex_lock(&ocelot->tas_lock);
|
||
|
-
|
||
|
if (ocelot_port->taprio)
|
||
|
vsc9959_tas_guard_bands_update(ocelot, port);
|
||
|
|
||
|
@@ -2709,7 +2765,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
|
||
|
{
|
||
|
struct felix *felix = ocelot_to_felix(ocelot);
|
||
|
struct dsa_switch *ds = felix->ds;
|
||
|
- int port, other_port;
|
||
|
+ int tc, port, other_port;
|
||
|
|
||
|
lockdep_assert_held(&ocelot->fwd_domain_lock);
|
||
|
|
||
|
@@ -2753,19 +2809,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
|
||
|
min_speed = other_ocelot_port->speed;
|
||
|
}
|
||
|
|
||
|
- /* Enable cut-through forwarding for all traffic classes. */
|
||
|
- if (ocelot_port->speed == min_speed)
|
||
|
+ /* Enable cut-through forwarding for all traffic classes that
|
||
|
+ * don't have oversized dropping enabled, since this check is
|
||
|
+ * bypassed in cut-through mode.
|
||
|
+ */
|
||
|
+ if (ocelot_port->speed == min_speed) {
|
||
|
val = GENMASK(7, 0);
|
||
|
|
||
|
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
|
||
|
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
|
||
|
+ val &= ~BIT(tc);
|
||
|
+ }
|
||
|
+
|
||
|
set:
|
||
|
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
|
||
|
if (tmp == val)
|
||
|
continue;
|
||
|
|
||
|
dev_dbg(ocelot->dev,
|
||
|
- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
|
||
|
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
|
||
|
port, mask, ocelot_port->speed, min_speed,
|
||
|
- val ? "enabling" : "disabling");
|
||
|
+ val ? "enabling" : "disabling", val);
|
||
|
|
||
|
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
|
||
|
}
|
||
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
|
||
|
index 407fe8f340a06..c5b61bc80f783 100644
|
||
|
--- a/drivers/net/ethernet/intel/i40e/i40e.h
|
||
|
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
|
||
|
@@ -1291,4 +1291,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
|
||
|
int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
|
||
|
struct i40e_cloud_filter *filter,
|
||
|
bool add);
|
||
|
+
|
||
|
+/**
|
||
|
+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
|
||
|
+ * @pf: pointer to a pf.
|
||
|
+ *
|
||
|
+ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
|
||
|
+ *
|
||
|
+ * Return: I40E_FLAG_TC_MQPRIO set state.
|
||
|
+ **/
|
||
|
+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
|
||
|
+{
|
||
|
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
|
||
|
+}
|
||
|
+
|
||
|
#endif /* _I40E_H_ */
|
||
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
|
||
|
index ea2bb0140a6eb..10d7a982a5b9b 100644
|
||
|
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
|
||
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
|
||
|
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
|
||
|
"Cannot locate client instance close routine\n");
|
||
|
return;
|
||
|
}
|
||
|
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
|
||
|
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
|
||
|
+ return;
|
||
|
+ }
|
||
|
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
|
||
|
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
|
||
|
i40e_client_release_qvlist(&cdev->lan_info);
|
||
|
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
|
||
|
/* Remove failed client instance */
|
||
|
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
|
||
|
&cdev->state);
|
||
|
- i40e_client_del_instance(pf);
|
||
|
return;
|
||
|
}
|
||
|
}
|
||
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
|
||
|
index 22a61802a4027..ed9984f1e1b9f 100644
|
||
|
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
|
||
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
|
||
|
@@ -4931,7 +4931,7 @@ static int i40e_set_channels(struct net_device *dev,
|
||
|
/* We do not support setting channels via ethtool when TCs are
|
||
|
* configured through mqprio
|
||
|
*/
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf))
|
||
|
return -EINVAL;
|
||
|
|
||
|
/* verify they are not requesting separate vectors */
|
||
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
||
|
index 71a8e1698ed48..1aaf0c5ddf6cf 100644
|
||
|
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
|
||
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
|
||
|
@@ -5339,7 +5339,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
|
||
|
u8 num_tc = 0;
|
||
|
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
|
||
|
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf))
|
||
|
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
|
||
|
|
||
|
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
|
||
|
@@ -5371,7 +5371,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
|
||
|
**/
|
||
|
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
|
||
|
{
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf))
|
||
|
return i40e_mqprio_get_enabled_tc(pf);
|
||
|
|
||
|
/* If neither MQPRIO nor DCB is enabled for this PF then just return
|
||
|
@@ -5468,7 +5468,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
|
||
|
int i;
|
||
|
|
||
|
/* There is no need to reset BW when mqprio mode is on. */
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf))
|
||
|
return 0;
|
||
|
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
|
||
|
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
|
||
|
@@ -5540,7 +5540,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
|
||
|
vsi->tc_config.tc_info[i].qoffset);
|
||
|
}
|
||
|
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf))
|
||
|
return;
|
||
|
|
||
|
/* Assign UP2TC map for the VSI */
|
||
|
@@ -5701,7 +5701,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
|
||
|
ctxt.vf_num = 0;
|
||
|
ctxt.uplink_seid = vsi->uplink_seid;
|
||
|
ctxt.info = vsi->info;
|
||
|
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf)) {
|
||
|
ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
|
||
|
if (ret)
|
||
|
goto out;
|
||
|
@@ -6425,7 +6425,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
|
||
|
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
|
||
|
|
||
|
if (vsi->type == I40E_VSI_MAIN) {
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf))
|
||
|
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
|
||
|
else
|
||
|
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
|
||
|
@@ -6536,6 +6536,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
|
||
|
vsi->tc_seid_map[i] = ch->seid;
|
||
|
}
|
||
|
}
|
||
|
+
|
||
|
+ /* reset to reconfigure TX queue contexts */
|
||
|
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
|
||
|
return ret;
|
||
|
|
||
|
err_free:
|
||
|
@@ -7819,7 +7822,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
|
||
|
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
|
||
|
return ERR_PTR(-EINVAL);
|
||
|
}
|
||
|
- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf)) {
|
||
|
netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
|
||
|
return ERR_PTR(-EINVAL);
|
||
|
}
|
||
|
@@ -8072,7 +8075,7 @@ config_tc:
|
||
|
/* Quiesce VSI queues */
|
||
|
i40e_quiesce_vsi(vsi);
|
||
|
|
||
|
- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
|
||
|
+ if (!hw && !i40e_is_tc_mqprio_enabled(pf))
|
||
|
i40e_remove_queue_channels(vsi);
|
||
|
|
||
|
/* Configure VSI for enabled TCs */
|
||
|
@@ -8096,7 +8099,7 @@ config_tc:
|
||
|
"Setup channel (id:%u) utilizing num_queues %d\n",
|
||
|
vsi->seid, vsi->tc_config.tc_info[0].qcount);
|
||
|
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf)) {
|
||
|
if (vsi->mqprio_qopt.max_rate[0]) {
|
||
|
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
|
||
|
|
||
|
@@ -10750,7 +10753,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
|
||
|
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
|
||
|
* is not supported with new link speed
|
||
|
*/
|
||
|
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
|
||
|
+ if (i40e_is_tc_mqprio_enabled(pf)) {
|
||
|
i40e_aq_set_dcb_parameters(hw, false, NULL);
|
||
|
} else {
|
||
|
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
|
||
|
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
|
||
|
index af69ccc6e8d2f..07f1e209d524d 100644
|
||
|
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
|
||
|
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
|
||
|
@@ -3689,7 +3689,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
|
||
|
u8 prio;
|
||
|
|
||
|
/* is DCB enabled at all? */
|
||
|
- if (vsi->tc_config.numtc == 1)
|
||
|
+ if (vsi->tc_config.numtc == 1 ||
|
||
|
+ i40e_is_tc_mqprio_enabled(vsi->back))
|
||
|
return netdev_pick_tx(netdev, skb, sb_dev);
|
||
|
|
||
|
prio = skb->priority;
|
||
|
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
|
||
|
index 6d159334da9ec..981c43b204ff4 100644
|
||
|
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
|
||
|
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
|
||
|
@@ -2789,6 +2789,11 @@ static void iavf_reset_task(struct work_struct *work)
|
||
|
int i = 0, err;
|
||
|
bool running;
|
||
|
|
||
|
+ /* Detach interface to avoid subsequent NDO callbacks */
|
||
|
+ rtnl_lock();
|
||
|
+ netif_device_detach(netdev);
|
||
|
+ rtnl_unlock();
|
||
|
+
|
||
|
/* When device is being removed it doesn't make sense to run the reset
|
||
|
* task, just return in such a case.
|
||
|
*/
|
||
|
@@ -2796,7 +2801,7 @@ static void iavf_reset_task(struct work_struct *work)
|
||
|
if (adapter->state != __IAVF_REMOVE)
|
||
|
queue_work(iavf_wq, &adapter->reset_task);
|
||
|
|
||
|
- return;
|
||
|
+ goto reset_finish;
|
||
|
}
|
||
|
|
||
|
while (!mutex_trylock(&adapter->client_lock))
|
||
|
@@ -2866,7 +2871,6 @@ continue_reset:
|
||
|
|
||
|
if (running) {
|
||
|
netif_carrier_off(netdev);
|
||
|
- netif_tx_stop_all_queues(netdev);
|
||
|
adapter->link_up = false;
|
||
|
iavf_napi_disable_all(adapter);
|
||
|
}
|
||
|
@@ -2996,7 +3000,7 @@ continue_reset:
|
||
|
mutex_unlock(&adapter->client_lock);
|
||
|
mutex_unlock(&adapter->crit_lock);
|
||
|
|
||
|
- return;
|
||
|
+ goto reset_finish;
|
||
|
reset_err:
|
||
|
if (running) {
|
||
|
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
|
||
|
@@ -3007,6 +3011,10 @@ reset_err:
|
||
|
mutex_unlock(&adapter->client_lock);
|
||
|
mutex_unlock(&adapter->crit_lock);
|
||
|
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
|
||
|
+reset_finish:
|
||
|
+ rtnl_lock();
|
||
|
+ netif_device_attach(netdev);
|
||
|
+ rtnl_unlock();
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
|
||
|
index 136d7911adb48..1e32438081780 100644
|
||
|
--- a/drivers/net/ethernet/intel/ice/ice_base.c
|
||
|
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
|
||
|
@@ -7,18 +7,6 @@
|
||
|
#include "ice_dcb_lib.h"
|
||
|
#include "ice_sriov.h"
|
||
|
|
||
|
-static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
|
||
|
-{
|
||
|
- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
|
||
|
- return !!rx_ring->xdp_buf;
|
||
|
-}
|
||
|
-
|
||
|
-static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
|
||
|
-{
|
||
|
- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
|
||
|
- return !!rx_ring->rx_buf;
|
||
|
-}
|
||
|
-
|
||
|
/**
|
||
|
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
|
||
|
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
|
||
|
@@ -519,11 +507,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||
|
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
|
||
|
ring->q_index, ring->q_vector->napi.napi_id);
|
||
|
|
||
|
- kfree(ring->rx_buf);
|
||
|
ring->xsk_pool = ice_xsk_pool(ring);
|
||
|
if (ring->xsk_pool) {
|
||
|
- if (!ice_alloc_rx_buf_zc(ring))
|
||
|
- return -ENOMEM;
|
||
|
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
|
||
|
|
||
|
ring->rx_buf_len =
|
||
|
@@ -538,8 +523,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||
|
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
|
||
|
ring->q_index);
|
||
|
} else {
|
||
|
- if (!ice_alloc_rx_buf(ring))
|
||
|
- return -ENOMEM;
|
||
|
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
|
||
|
/* coverity[check_return] */
|
||
|
xdp_rxq_info_reg(&ring->xdp_rxq,
|
||
|
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
|
||
|
index 3d45e075204e3..4c6bb7482b362 100644
|
||
|
--- a/drivers/net/ethernet/intel/ice/ice_main.c
|
||
|
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
|
||
|
@@ -2898,10 +2898,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
|
||
|
if (xdp_ring_err)
|
||
|
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
|
||
|
}
|
||
|
+ /* reallocate Rx queues that are used for zero-copy */
|
||
|
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
|
||
|
+ if (xdp_ring_err)
|
||
|
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
|
||
|
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
|
||
|
xdp_ring_err = ice_destroy_xdp_rings(vsi);
|
||
|
if (xdp_ring_err)
|
||
|
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
|
||
|
+ /* reallocate Rx queues that were used for zero-copy */
|
||
|
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
|
||
|
+ if (xdp_ring_err)
|
||
|
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
|
||
|
} else {
|
||
|
/* safe to call even when prog == vsi->xdp_prog as
|
||
|
* dev_xdp_install in net/core/dev.c incremented prog's
|
||
|
@@ -3904,7 +3912,7 @@ static int ice_init_pf(struct ice_pf *pf)
|
||
|
|
||
|
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
|
||
|
if (!pf->avail_rxqs) {
|
||
|
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
|
||
|
+ bitmap_free(pf->avail_txqs);
|
||
|
pf->avail_txqs = NULL;
|
||
|
return -ENOMEM;
|
||
|
}
|
||
|
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
|
||
|
index e48e29258450f..03ce85f6e6df8 100644
|
||
|
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
|
||
|
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
|
||
|
@@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||
|
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
|
||
|
if (err)
|
||
|
return err;
|
||
|
+ ice_clean_rx_ring(rx_ring);
|
||
|
|
||
|
ice_qvec_toggle_napi(vsi, q_vector, false);
|
||
|
ice_qp_clean_rings(vsi, q_idx);
|
||
|
@@ -316,6 +317,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
+/**
|
||
|
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
|
||
|
+ * @rx_ring: Rx ring
|
||
|
+ * @pool_present: is pool for XSK present
|
||
|
+ *
|
||
|
+ * Try allocating memory and return ENOMEM, if failed to allocate.
|
||
|
+ * If allocation was successful, substitute buffer with allocated one.
|
||
|
+ * Returns 0 on success, negative on failure
|
||
|
+ */
|
||
|
+static int
|
||
|
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
|
||
|
+{
|
||
|
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
|
||
|
+ sizeof(*rx_ring->rx_buf);
|
||
|
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
|
||
|
+
|
||
|
+ if (!sw_ring)
|
||
|
+ return -ENOMEM;
|
||
|
+
|
||
|
+ if (pool_present) {
|
||
|
+ kfree(rx_ring->rx_buf);
|
||
|
+ rx_ring->rx_buf = NULL;
|
||
|
+ rx_ring->xdp_buf = sw_ring;
|
||
|
+ } else {
|
||
|
+ kfree(rx_ring->xdp_buf);
|
||
|
+ rx_ring->xdp_buf = NULL;
|
||
|
+ rx_ring->rx_buf = sw_ring;
|
||
|
+ }
|
||
|
+
|
||
|
+ return 0;
|
||
|
+}
|
||
|
+
|
||
|
+/**
|
||
|
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
|
||
|
+ * @vsi: Current VSI
|
||
|
+ * @zc: is zero copy set
|
||
|
+ *
|
||
|
+ * Reallocate buffer for rx_rings that might be used by XSK.
|
||
|
+ * XDP requires more memory, than rx_buf provides.
|
||
|
+ * Returns 0 on success, negative on failure
|
||
|
+ */
|
||
|
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
|
||
|
+{
|
||
|
+ struct ice_rx_ring *rx_ring;
|
||
|
+ unsigned long q;
|
||
|
+
|
||
|
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
|
||
|
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
|
||
|
+ rx_ring = vsi->rx_rings[q];
|
||
|
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
|
||
|
+ return -ENOMEM;
|
||
|
+ }
|
||
|
+
|
||
|
+ return 0;
|
||
|
+}
|
||
|
+
|
||
|
/**
|
||
|
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
|
||
|
* @vsi: Current VSI
|
||
|
@@ -345,11 +402,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
|
||
|
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
|
||
|
|
||
|
if (if_running) {
|
||
|
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
|
||
|
+
|
||
|
ret = ice_qp_dis(vsi, qid);
|
||
|
if (ret) {
|
||
|
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
|
||
|
goto xsk_pool_if_up;
|
||
|
}
|
||
|
+
|
||
|
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
|
||
|
+ if (ret)
|
||
|
+ goto xsk_pool_if_up;
|
||
|
}
|
||
|
|
||
|
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
|
||
|
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
|
||
|
index 21faec8e97db1..4edbe81eb6460 100644
|
||
|
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
|
||
|
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
|
||
|
@@ -27,6 +27,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
|
||
|
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
|
||
|
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
|
||
|
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
|
||
|
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
|
||
|
#else
|
||
|
static inline bool
|
||
|
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
||
|
@@ -72,5 +73,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
|
||
|
|
||
|
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
|
||
|
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
|
||
|
+
|
||
|
+static inline int
|
||
|
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
|
||
|
+ bool __always_unused zc)
|
||
|
+{
|
||
|
+ return 0;
|
||
|
+}
|
||
|
#endif /* CONFIG_XDP_SOCKETS */
|
||
|
#endif /* !_ICE_XSK_H_ */
|
||
|
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||
|
index dab8f3f771f84..cfe804bc8d205 100644
|
||
|
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||
|
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
|
||
|
@@ -412,7 +412,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
|
||
|
if (entry->hash != 0xffff) {
|
||
|
ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
|
||
|
ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
|
||
|
- MTK_FOE_STATE_BIND);
|
||
|
+ MTK_FOE_STATE_UNBIND);
|
||
|
dma_wmb();
|
||
|
}
|
||
|
entry->hash = 0xffff;
|
||
|
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||
|
index 1f5cf1c9a9475..69ffce04d6306 100644
|
||
|
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||
|
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
|
||
|
@@ -293,6 +293,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
|
||
|
if (!ppe)
|
||
|
return;
|
||
|
|
||
|
+ if (hash > MTK_PPE_HASH_MASK)
|
||
|
+ return;
|
||
|
+
|
||
|
now = (u16)jiffies;
|
||
|
diff = now - ppe->foe_check_time[hash];
|
||
|
if (diff < HZ / 10)
|
||
|
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
|
||
|
index 73f7962a37d33..c49062ad72c6c 100644
|
||
|
--- a/drivers/net/phy/meson-gxl.c
|
||
|
+++ b/drivers/net/phy/meson-gxl.c
|
||
|
@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
|
||
|
irq_status == INTSRC_ENERGY_DETECT)
|
||
|
return IRQ_HANDLED;
|
||
|
|
||
|
- /* Give PHY some time before MAC starts sending data. This works
|
||
|
- * around an issue where network doesn't come up properly.
|
||
|
- */
|
||
|
- if (!(irq_status & INTSRC_LINK_DOWN))
|
||
|
- phy_queue_state_machine(phydev, msecs_to_jiffies(100));
|
||
|
- else
|
||
|
- phy_trigger_machine(phydev);
|
||
|
+ phy_trigger_machine(phydev);
|
||
|
|
||
|
return IRQ_HANDLED;
|
||
|
}
|
||
|
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
|
||
|
index d4c93d59bc539..8569a545e0a3f 100644
|
||
|
--- a/drivers/net/phy/microchip_t1.c
|
||
|
+++ b/drivers/net/phy/microchip_t1.c
|
||
|
@@ -28,12 +28,16 @@
|
||
|
|
||
|
/* Interrupt Source Register */
|
||
|
#define LAN87XX_INTERRUPT_SOURCE (0x18)
|
||
|
+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
|
||
|
|
||
|
/* Interrupt Mask Register */
|
||
|
#define LAN87XX_INTERRUPT_MASK (0x19)
|
||
|
#define LAN87XX_MASK_LINK_UP (0x0004)
|
||
|
#define LAN87XX_MASK_LINK_DOWN (0x0002)
|
||
|
|
||
|
+#define LAN87XX_INTERRUPT_MASK_2 (0x09)
|
||
|
+#define LAN87XX_MASK_COMM_RDY BIT(10)
|
||
|
+
|
||
|
/* MISC Control 1 Register */
|
||
|
#define LAN87XX_CTRL_1 (0x11)
|
||
|
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
|
||
|
@@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
|
||
|
int rc, val = 0;
|
||
|
|
||
|
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
|
||
|
- /* unmask all source and clear them before enable */
|
||
|
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
|
||
|
+ /* clear all interrupt */
|
||
|
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||
|
- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
|
||
|
+ PHYACC_ATTR_BANK_MISC,
|
||
|
+ LAN87XX_INTERRUPT_MASK_2, val);
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
||
|
+ PHYACC_ATTR_BANK_MISC,
|
||
|
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ /* enable link down and comm ready interrupt */
|
||
|
+ val = LAN87XX_MASK_LINK_DOWN;
|
||
|
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ val = LAN87XX_MASK_COMM_RDY;
|
||
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
|
||
|
+ PHYACC_ATTR_BANK_MISC,
|
||
|
+ LAN87XX_INTERRUPT_MASK_2, val);
|
||
|
} else {
|
||
|
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
|
||
|
- if (rc)
|
||
|
+ if (rc < 0)
|
||
|
return rc;
|
||
|
|
||
|
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
|
||
|
+ PHYACC_ATTR_BANK_MISC,
|
||
|
+ LAN87XX_INTERRUPT_MASK_2, val);
|
||
|
+ if (rc < 0)
|
||
|
+ return rc;
|
||
|
+
|
||
|
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
||
|
+ PHYACC_ATTR_BANK_MISC,
|
||
|
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
|
||
|
}
|
||
|
|
||
|
return rc < 0 ? rc : 0;
|
||
|
@@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
|
||
|
{
|
||
|
int irq_status;
|
||
|
|
||
|
+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
|
||
|
+ PHYACC_ATTR_BANK_MISC,
|
||
|
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
|
||
|
+ if (irq_status < 0) {
|
||
|
+ phy_error(phydev);
|
||
|
+ return IRQ_NONE;
|
||
|
+ }
|
||
|
+
|
||
|
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
|
||
|
if (irq_status < 0) {
|
||
|
phy_error(phydev);
|
||
|
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
|
||
|
index c62f299b9e0a8..d8a5dbf89a021 100644
|
||
|
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
|
||
|
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
|
||
|
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||
|
/* Repeat initial/next rate.
|
||
|
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
|
||
|
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
|
||
|
- while (repeat_rate > 0) {
|
||
|
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
|
||
|
if (is_legacy(tbl_type.lq_type)) {
|
||
|
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
|
||
|
ant_toggle_cnt++;
|
||
|
@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||
|
cpu_to_le32(new_rate);
|
||
|
repeat_rate--;
|
||
|
idx++;
|
||
|
- if (idx >= LINK_QUAL_MAX_RETRY_NUM)
|
||
|
- goto out;
|
||
|
}
|
||
|
|
||
|
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
|
||
|
@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||
|
repeat_rate--;
|
||
|
}
|
||
|
|
||
|
-out:
|
||
|
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
|
||
|
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
|
||
|
|
||
|
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
|
||
|
index b0f58bcf70cb0..106c88b723b90 100644
|
||
|
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
|
||
|
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
|
||
|
@@ -345,7 +345,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
|
||
|
|
||
|
err = mt7921e_driver_own(dev);
|
||
|
if (err)
|
||
|
- return err;
|
||
|
+ goto out;
|
||
|
|
||
|
err = mt7921_run_firmware(dev);
|
||
|
if (err)
|
||
|
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
|
||
|
index a067274c20144..bf001e9def6aa 100644
|
||
|
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
|
||
|
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
|
||
|
@@ -254,6 +254,7 @@ struct wilc {
|
||
|
u8 *rx_buffer;
|
||
|
u32 rx_buffer_offset;
|
||
|
u8 *tx_buffer;
|
||
|
+ u32 *vmm_table;
|
||
|
|
||
|
struct txq_handle txq[NQUEUES];
|
||
|
int txq_entries;
|
||
|
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
|
||
|
index 7962c11cfe848..56f924a31bc66 100644
|
||
|
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
|
||
|
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
|
||
|
@@ -27,6 +27,7 @@ struct wilc_sdio {
|
||
|
bool irq_gpio;
|
||
|
u32 block_size;
|
||
|
int has_thrpt_enh3;
|
||
|
+ u8 *cmd53_buf;
|
||
|
};
|
||
|
|
||
|
struct sdio_cmd52 {
|
||
|
@@ -46,6 +47,7 @@ struct sdio_cmd53 {
|
||
|
u32 count: 9;
|
||
|
u8 *buffer;
|
||
|
u32 block_size;
|
||
|
+ bool use_global_buf;
|
||
|
};
|
||
|
|
||
|
static const struct wilc_hif_func wilc_hif_sdio;
|
||
|
@@ -90,6 +92,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
|
||
|
{
|
||
|
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
|
||
|
int size, ret;
|
||
|
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
|
||
|
+ u8 *buf = cmd->buffer;
|
||
|
|
||
|
sdio_claim_host(func);
|
||
|
|
||
|
@@ -100,12 +104,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
|
||
|
else
|
||
|
size = cmd->count;
|
||
|
|
||
|
+ if (cmd->use_global_buf) {
|
||
|
+ if (size > sizeof(u32))
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
+ buf = sdio_priv->cmd53_buf;
|
||
|
+ }
|
||
|
+
|
||
|
if (cmd->read_write) { /* write */
|
||
|
- ret = sdio_memcpy_toio(func, cmd->address,
|
||
|
- (void *)cmd->buffer, size);
|
||
|
+ if (cmd->use_global_buf)
|
||
|
+ memcpy(buf, cmd->buffer, size);
|
||
|
+
|
||
|
+ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
|
||
|
} else { /* read */
|
||
|
- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
|
||
|
- cmd->address, size);
|
||
|
+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
|
||
|
+
|
||
|
+ if (cmd->use_global_buf)
|
||
|
+ memcpy(cmd->buffer, buf, size);
|
||
|
}
|
||
|
|
||
|
sdio_release_host(func);
|
||
|
@@ -127,6 +142,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
|
||
|
if (!sdio_priv)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
|
||
|
+ if (!sdio_priv->cmd53_buf) {
|
||
|
+ ret = -ENOMEM;
|
||
|
+ goto free;
|
||
|
+ }
|
||
|
+
|
||
|
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
|
||
|
&wilc_hif_sdio);
|
||
|
if (ret)
|
||
|
@@ -160,6 +181,7 @@ dispose_irq:
|
||
|
irq_dispose_mapping(wilc->dev_irq_num);
|
||
|
wilc_netdev_cleanup(wilc);
|
||
|
free:
|
||
|
+ kfree(sdio_priv->cmd53_buf);
|
||
|
kfree(sdio_priv);
|
||
|
return ret;
|
||
|
}
|
||
|
@@ -171,6 +193,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
|
||
|
|
||
|
clk_disable_unprepare(wilc->rtc_clk);
|
||
|
wilc_netdev_cleanup(wilc);
|
||
|
+ kfree(sdio_priv->cmd53_buf);
|
||
|
kfree(sdio_priv);
|
||
|
}
|
||
|
|
||
|
@@ -367,8 +390,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
|
||
|
cmd.address = WILC_SDIO_FBR_DATA_REG;
|
||
|
cmd.block_mode = 0;
|
||
|
cmd.increment = 1;
|
||
|
- cmd.count = 4;
|
||
|
+ cmd.count = sizeof(u32);
|
||
|
cmd.buffer = (u8 *)&data;
|
||
|
+ cmd.use_global_buf = true;
|
||
|
cmd.block_size = sdio_priv->block_size;
|
||
|
ret = wilc_sdio_cmd53(wilc, &cmd);
|
||
|
if (ret)
|
||
|
@@ -406,6 +430,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
|
||
|
nblk = size / block_size;
|
||
|
nleft = size % block_size;
|
||
|
|
||
|
+ cmd.use_global_buf = false;
|
||
|
if (nblk > 0) {
|
||
|
cmd.block_mode = 1;
|
||
|
cmd.increment = 1;
|
||
|
@@ -484,8 +509,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
|
||
|
cmd.address = WILC_SDIO_FBR_DATA_REG;
|
||
|
cmd.block_mode = 0;
|
||
|
cmd.increment = 1;
|
||
|
- cmd.count = 4;
|
||
|
+ cmd.count = sizeof(u32);
|
||
|
cmd.buffer = (u8 *)data;
|
||
|
+ cmd.use_global_buf = true;
|
||
|
|
||
|
cmd.block_size = sdio_priv->block_size;
|
||
|
ret = wilc_sdio_cmd53(wilc, &cmd);
|
||
|
@@ -527,6 +553,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
|
||
|
nblk = size / block_size;
|
||
|
nleft = size % block_size;
|
||
|
|
||
|
+ cmd.use_global_buf = false;
|
||
|
if (nblk > 0) {
|
||
|
cmd.block_mode = 1;
|
||
|
cmd.increment = 1;
|
||
|
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
|
||
|
index 48441f0389ca1..0c8a571486d25 100644
|
||
|
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
|
||
|
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
|
||
|
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
|
||
|
int ret = 0;
|
||
|
int counter;
|
||
|
int timeout;
|
||
|
- u32 vmm_table[WILC_VMM_TBL_SIZE];
|
||
|
+ u32 *vmm_table = wilc->vmm_table;
|
||
|
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
|
||
|
const struct wilc_hif_func *func;
|
||
|
int srcu_idx;
|
||
|
@@ -1251,6 +1251,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
|
||
|
while ((rqe = wilc_wlan_rxq_remove(wilc)))
|
||
|
kfree(rqe);
|
||
|
|
||
|
+ kfree(wilc->vmm_table);
|
||
|
+ wilc->vmm_table = NULL;
|
||
|
kfree(wilc->rx_buffer);
|
||
|
wilc->rx_buffer = NULL;
|
||
|
kfree(wilc->tx_buffer);
|
||
|
@@ -1485,6 +1487,14 @@ int wilc_wlan_init(struct net_device *dev)
|
||
|
goto fail;
|
||
|
}
|
||
|
|
||
|
+ if (!wilc->vmm_table)
|
||
|
+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
|
||
|
+
|
||
|
+ if (!wilc->vmm_table) {
|
||
|
+ ret = -ENOBUFS;
|
||
|
+ goto fail;
|
||
|
+ }
|
||
|
+
|
||
|
if (!wilc->tx_buffer)
|
||
|
wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
|
||
|
|
||
|
@@ -1509,7 +1519,8 @@ int wilc_wlan_init(struct net_device *dev)
|
||
|
return 0;
|
||
|
|
||
|
fail:
|
||
|
-
|
||
|
+ kfree(wilc->vmm_table);
|
||
|
+ wilc->vmm_table = NULL;
|
||
|
kfree(wilc->rx_buffer);
|
||
|
wilc->rx_buffer = NULL;
|
||
|
kfree(wilc->tx_buffer);
|
||
|
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
|
||
|
index 990360d75cb64..e85b3c5d4acce 100644
|
||
|
--- a/drivers/net/xen-netback/xenbus.c
|
||
|
+++ b/drivers/net/xen-netback/xenbus.c
|
||
|
@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
|
||
|
unsigned int queue_index;
|
||
|
|
||
|
xen_unregister_watchers(vif);
|
||
|
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
|
||
|
#ifdef CONFIG_DEBUG_FS
|
||
|
xenvif_debugfs_delif(vif);
|
||
|
#endif /* CONFIG_DEBUG_FS */
|
||
|
@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
|
||
|
struct backend_info *be = dev_get_drvdata(&dev->dev);
|
||
|
|
||
|
unregister_hotplug_status_watch(be);
|
||
|
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
|
||
|
if (be->vif) {
|
||
|
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
|
||
|
backend_disconnect(be);
|
||
|
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
|
||
|
index 7a9e6ffa23429..daa0e160e1212 100644
|
||
|
--- a/drivers/nvme/host/tcp.c
|
||
|
+++ b/drivers/nvme/host/tcp.c
|
||
|
@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
|
||
|
struct mutex send_mutex;
|
||
|
struct llist_head req_list;
|
||
|
struct list_head send_list;
|
||
|
- bool more_requests;
|
||
|
|
||
|
/* recv state */
|
||
|
void *pdu;
|
||
|
@@ -318,7 +317,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
||
|
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
|
||
|
{
|
||
|
return !list_empty(&queue->send_list) ||
|
||
|
- !llist_empty(&queue->req_list) || queue->more_requests;
|
||
|
+ !llist_empty(&queue->req_list);
|
||
|
}
|
||
|
|
||
|
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||
|
@@ -337,9 +336,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||
|
*/
|
||
|
if (queue->io_cpu == raw_smp_processor_id() &&
|
||
|
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
||
|
- queue->more_requests = !last;
|
||
|
nvme_tcp_send_all(queue);
|
||
|
- queue->more_requests = false;
|
||
|
mutex_unlock(&queue->send_mutex);
|
||
|
}
|
||
|
|
||
|
@@ -1227,7 +1224,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
|
||
|
else if (unlikely(result < 0))
|
||
|
return;
|
||
|
|
||
|
- if (!pending)
|
||
|
+ if (!pending || !queue->rd_enabled)
|
||
|
return;
|
||
|
|
||
|
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
|
||
|
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
|
||
|
index c27660a660d9a..a339719100051 100644
|
||
|
--- a/drivers/nvme/target/core.c
|
||
|
+++ b/drivers/nvme/target/core.c
|
||
|
@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
|
||
|
|
||
|
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||
|
{
|
||
|
+ struct nvmet_ns *ns = req->ns;
|
||
|
+
|
||
|
if (!req->sq->sqhd_disabled)
|
||
|
nvmet_update_sq_head(req);
|
||
|
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
|
||
|
@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||
|
|
||
|
trace_nvmet_req_complete(req);
|
||
|
|
||
|
- if (req->ns)
|
||
|
- nvmet_put_namespace(req->ns);
|
||
|
req->ops->queue_response(req);
|
||
|
+ if (ns)
|
||
|
+ nvmet_put_namespace(ns);
|
||
|
}
|
||
|
|
||
|
void nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||
|
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
|
||
|
index 82b61acf7a72b..1956be87ac5ff 100644
|
||
|
--- a/drivers/nvme/target/zns.c
|
||
|
+++ b/drivers/nvme/target/zns.c
|
||
|
@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
||
|
struct nvme_id_ns_zns *id_zns;
|
||
|
u64 zsze;
|
||
|
u16 status;
|
||
|
+ u32 mar, mor;
|
||
|
|
||
|
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||
|
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||
|
@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
|
||
|
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
|
||
|
req->ns->blksize_shift;
|
||
|
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
|
||
|
- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
|
||
|
- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
|
||
|
+
|
||
|
+ mor = bdev_max_open_zones(req->ns->bdev);
|
||
|
+ if (!mor)
|
||
|
+ mor = U32_MAX;
|
||
|
+ else
|
||
|
+ mor--;
|
||
|
+ id_zns->mor = cpu_to_le32(mor);
|
||
|
+
|
||
|
+ mar = bdev_max_active_zones(req->ns->bdev);
|
||
|
+ if (!mar)
|
||
|
+ mar = U32_MAX;
|
||
|
+ else
|
||
|
+ mar--;
|
||
|
+ id_zns->mar = cpu_to_le32(mar);
|
||
|
|
||
|
done:
|
||
|
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
|
||
|
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
|
||
|
index 9be007c9420f9..f69ab90b5e22d 100644
|
||
|
--- a/drivers/parisc/ccio-dma.c
|
||
|
+++ b/drivers/parisc/ccio-dma.c
|
||
|
@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void __init ccio_init_resources(struct ioc *ioc)
|
||
|
+static int __init ccio_init_resources(struct ioc *ioc)
|
||
|
{
|
||
|
struct resource *res = ioc->mmio_region;
|
||
|
char *name = kmalloc(14, GFP_KERNEL);
|
||
|
-
|
||
|
+ if (unlikely(!name))
|
||
|
+ return -ENOMEM;
|
||
|
snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
|
||
|
|
||
|
ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
|
||
|
ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
|
||
|
+ return 0;
|
||
|
}
|
||
|
|
||
|
static int new_ioc_area(struct resource *res, unsigned long size,
|
||
|
@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev)
|
||
|
return -ENOMEM;
|
||
|
}
|
||
|
ccio_ioc_init(ioc);
|
||
|
- ccio_init_resources(ioc);
|
||
|
+ if (ccio_init_resources(ioc)) {
|
||
|
+ kfree(ioc);
|
||
|
+ return -ENOMEM;
|
||
|
+ }
|
||
|
hppa_dma_ops = &ccio_ops;
|
||
|
|
||
|
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
|
||
|
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
|
||
|
index 231d86d3949c0..1ec5baa673f92 100644
|
||
|
--- a/drivers/perf/riscv_pmu_sbi.c
|
||
|
+++ b/drivers/perf/riscv_pmu_sbi.c
|
||
|
@@ -467,7 +467,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
|
||
|
if (!pmu_ctr_list)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
- for (i = 0; i <= nctr; i++) {
|
||
|
+ for (i = 0; i < nctr; i++) {
|
||
|
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
|
||
|
if (ret.error)
|
||
|
/* The logical counter ids are not expected to be contiguous */
|
||
|
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
|
||
|
index 1e54a833f2cf0..a9daaf4d5aaab 100644
|
||
|
--- a/drivers/regulator/core.c
|
||
|
+++ b/drivers/regulator/core.c
|
||
|
@@ -2732,13 +2732,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
|
||
|
*/
|
||
|
static int _regulator_handle_consumer_enable(struct regulator *regulator)
|
||
|
{
|
||
|
+ int ret;
|
||
|
struct regulator_dev *rdev = regulator->rdev;
|
||
|
|
||
|
lockdep_assert_held_once(&rdev->mutex.base);
|
||
|
|
||
|
regulator->enable_count++;
|
||
|
- if (regulator->uA_load && regulator->enable_count == 1)
|
||
|
- return drms_uA_update(rdev);
|
||
|
+ if (regulator->uA_load && regulator->enable_count == 1) {
|
||
|
+ ret = drms_uA_update(rdev);
|
||
|
+ if (ret)
|
||
|
+ regulator->enable_count--;
|
||
|
+ return ret;
|
||
|
+ }
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
|
||
|
index 750dd1e9f2cc7..2ddc431cbd337 100644
|
||
|
--- a/drivers/scsi/lpfc/lpfc_init.c
|
||
|
+++ b/drivers/scsi/lpfc/lpfc_init.c
|
||
|
@@ -8061,7 +8061,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||
|
/* Allocate device driver memory */
|
||
|
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
|
||
|
if (rc)
|
||
|
- return -ENOMEM;
|
||
|
+ goto out_destroy_workqueue;
|
||
|
|
||
|
/* IF Type 2 ports get initialized now. */
|
||
|
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
|
||
|
@@ -8489,6 +8489,9 @@ out_free_bsmbx:
|
||
|
lpfc_destroy_bootstrap_mbox(phba);
|
||
|
out_free_mem:
|
||
|
lpfc_mem_free(phba);
|
||
|
+out_destroy_workqueue:
|
||
|
+ destroy_workqueue(phba->wq);
|
||
|
+ phba->wq = NULL;
|
||
|
return rc;
|
||
|
}
|
||
|
|
||
|
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
||
|
index 5b5885d9732b6..3e9b2b0099c7a 100644
|
||
|
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
||
|
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
|
||
|
@@ -5311,7 +5311,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
|
||
|
if (!fusion->log_to_span) {
|
||
|
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
|
||
|
__func__, __LINE__);
|
||
|
- kfree(instance->ctrl_context);
|
||
|
return -ENOMEM;
|
||
|
}
|
||
|
}
|
||
|
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
||
|
index 5e8887fa02c8a..e3b7ebf464244 100644
|
||
|
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
||
|
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
|
||
|
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
|
||
|
fw_event = list_first_entry(&ioc->fw_event_list,
|
||
|
struct fw_event_work, list);
|
||
|
list_del_init(&fw_event->list);
|
||
|
+ fw_event_work_put(fw_event);
|
||
|
}
|
||
|
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
|
||
|
|
||
|
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
|
||
|
if (cancel_work_sync(&fw_event->work))
|
||
|
fw_event_work_put(fw_event);
|
||
|
|
||
|
- fw_event_work_put(fw_event);
|
||
|
}
|
||
|
ioc->fw_events_cleanup = 0;
|
||
|
}
|
||
|
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
|
||
|
index 2b2f682883752..62666df1a59eb 100644
|
||
|
--- a/drivers/scsi/qla2xxx/qla_target.c
|
||
|
+++ b/drivers/scsi/qla2xxx/qla_target.c
|
||
|
@@ -6935,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
|
||
|
|
||
|
if (ha->flags.msix_enabled) {
|
||
|
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||
|
- if (IS_QLA2071(ha)) {
|
||
|
- /* 4 ports Baker: Enable Interrupt Handshake */
|
||
|
- icb->msix_atio = 0;
|
||
|
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
|
||
|
- } else {
|
||
|
- icb->msix_atio = cpu_to_le16(msix->entry);
|
||
|
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
|
||
|
- }
|
||
|
+ icb->msix_atio = cpu_to_le16(msix->entry);
|
||
|
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
|
||
|
ql_dbg(ql_dbg_init, vha, 0xf072,
|
||
|
"Registering ICB vector 0x%x for atio que.\n",
|
||
|
msix->entry);
|
||
|
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
|
||
|
index 78edb1ea4748d..f5c876d03c1ad 100644
|
||
|
--- a/drivers/scsi/scsi_lib.c
|
||
|
+++ b/drivers/scsi/scsi_lib.c
|
||
|
@@ -118,7 +118,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
|
||
|
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
|
||
|
{
|
||
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||
|
|
||
|
@@ -128,7 +128,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
|
||
|
} else {
|
||
|
WARN_ON_ONCE(true);
|
||
|
}
|
||
|
- blk_mq_requeue_request(rq, true);
|
||
|
+
|
||
|
+ if (msecs) {
|
||
|
+ blk_mq_requeue_request(rq, false);
|
||
|
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
|
||
|
+ } else
|
||
|
+ blk_mq_requeue_request(rq, true);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
@@ -658,14 +663,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq)
|
||
|
return bytes;
|
||
|
}
|
||
|
|
||
|
-/* Helper for scsi_io_completion() when "reprep" action required. */
|
||
|
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
|
||
|
- struct request_queue *q)
|
||
|
-{
|
||
|
- /* A new command will be prepared and issued. */
|
||
|
- scsi_mq_requeue_cmd(cmd);
|
||
|
-}
|
||
|
-
|
||
|
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
|
||
|
{
|
||
|
struct request *req = scsi_cmd_to_rq(cmd);
|
||
|
@@ -683,14 +680,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
+/*
|
||
|
+ * When ALUA transition state is returned, reprep the cmd to
|
||
|
+ * use the ALUA handler's transition timeout. Delay the reprep
|
||
|
+ * 1 sec to avoid aggressive retries of the target in that
|
||
|
+ * state.
|
||
|
+ */
|
||
|
+#define ALUA_TRANSITION_REPREP_DELAY 1000
|
||
|
+
|
||
|
/* Helper for scsi_io_completion() when special action required. */
|
||
|
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
||
|
{
|
||
|
- struct request_queue *q = cmd->device->request_queue;
|
||
|
struct request *req = scsi_cmd_to_rq(cmd);
|
||
|
int level = 0;
|
||
|
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
|
||
|
- ACTION_DELAYED_RETRY} action;
|
||
|
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
|
||
|
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
|
||
|
struct scsi_sense_hdr sshdr;
|
||
|
bool sense_valid;
|
||
|
bool sense_current = true; /* false implies "deferred sense" */
|
||
|
@@ -779,8 +783,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
||
|
action = ACTION_DELAYED_RETRY;
|
||
|
break;
|
||
|
case 0x0a: /* ALUA state transition */
|
||
|
- blk_stat = BLK_STS_TRANSPORT;
|
||
|
- fallthrough;
|
||
|
+ action = ACTION_DELAYED_REPREP;
|
||
|
+ break;
|
||
|
default:
|
||
|
action = ACTION_FAIL;
|
||
|
break;
|
||
|
@@ -839,7 +843,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
|
||
|
return;
|
||
|
fallthrough;
|
||
|
case ACTION_REPREP:
|
||
|
- scsi_io_completion_reprep(cmd, q);
|
||
|
+ scsi_mq_requeue_cmd(cmd, 0);
|
||
|
+ break;
|
||
|
+ case ACTION_DELAYED_REPREP:
|
||
|
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
|
||
|
break;
|
||
|
case ACTION_RETRY:
|
||
|
/* Retry the same command immediately */
|
||
|
@@ -933,7 +940,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
|
||
|
* command block will be released and the queue function will be goosed. If we
|
||
|
* are not done then we have to figure out what to do next:
|
||
|
*
|
||
|
- * a) We can call scsi_io_completion_reprep(). The request will be
|
||
|
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
|
||
|
* unprepared and put back on the queue. Then a new command will
|
||
|
* be created for it. This should be used if we made forward
|
||
|
* progress, or if we want to switch from READ(10) to READ(6) for
|
||
|
@@ -949,7 +956,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
|
||
|
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||
|
{
|
||
|
int result = cmd->result;
|
||
|
- struct request_queue *q = cmd->device->request_queue;
|
||
|
struct request *req = scsi_cmd_to_rq(cmd);
|
||
|
blk_status_t blk_stat = BLK_STS_OK;
|
||
|
|
||
|
@@ -986,7 +992,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||
|
* request just queue the command up again.
|
||
|
*/
|
||
|
if (likely(result == 0))
|
||
|
- scsi_io_completion_reprep(cmd, q);
|
||
|
+ scsi_mq_requeue_cmd(cmd, 0);
|
||
|
else
|
||
|
scsi_io_completion_action(cmd, result);
|
||
|
}
|
||
|
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
|
||
|
index 70ad0f3dce283..286f5d57c0cab 100644
|
||
|
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
|
||
|
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
|
||
|
@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
const struct of_device_id *of_id = NULL;
|
||
|
struct device_node *dn;
|
||
|
void __iomem *base;
|
||
|
- int ret, i;
|
||
|
+ int ret, i, s;
|
||
|
|
||
|
/* AON ctrl registers */
|
||
|
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
|
||
|
if (IS_ERR(base)) {
|
||
|
pr_err("error mapping AON_CTRL\n");
|
||
|
- return PTR_ERR(base);
|
||
|
+ ret = PTR_ERR(base);
|
||
|
+ goto aon_err;
|
||
|
}
|
||
|
ctrl.aon_ctrl_base = base;
|
||
|
|
||
|
@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
/* Assume standard offset */
|
||
|
ctrl.aon_sram = ctrl.aon_ctrl_base +
|
||
|
AON_CTRL_SYSTEM_DATA_RAM_OFS;
|
||
|
+ s = 0;
|
||
|
} else {
|
||
|
ctrl.aon_sram = base;
|
||
|
+ s = 1;
|
||
|
}
|
||
|
|
||
|
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
|
||
|
@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
(const void **)&ddr_phy_data);
|
||
|
if (IS_ERR(base)) {
|
||
|
pr_err("error mapping DDR PHY\n");
|
||
|
- return PTR_ERR(base);
|
||
|
+ ret = PTR_ERR(base);
|
||
|
+ goto ddr_phy_err;
|
||
|
}
|
||
|
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
|
||
|
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
|
||
|
@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
|
||
|
i = ctrl.num_memc;
|
||
|
if (i >= MAX_NUM_MEMC) {
|
||
|
+ of_node_put(dn);
|
||
|
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
base = of_io_request_and_map(dn, 0, dn->full_name);
|
||
|
if (IS_ERR(base)) {
|
||
|
+ of_node_put(dn);
|
||
|
if (!ctrl.support_warm_boot)
|
||
|
break;
|
||
|
|
||
|
pr_err("error mapping DDR SHIMPHY %d\n", i);
|
||
|
- return PTR_ERR(base);
|
||
|
+ ret = PTR_ERR(base);
|
||
|
+ goto ddr_shimphy_err;
|
||
|
}
|
||
|
ctrl.memcs[i].ddr_shimphy_base = base;
|
||
|
ctrl.num_memc++;
|
||
|
@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
for_each_matching_node(dn, brcmstb_memc_of_match) {
|
||
|
base = of_iomap(dn, 0);
|
||
|
if (!base) {
|
||
|
+ of_node_put(dn);
|
||
|
pr_err("error mapping DDR Sequencer %d\n", i);
|
||
|
- return -ENOMEM;
|
||
|
+ ret = -ENOMEM;
|
||
|
+ goto brcmstb_memc_err;
|
||
|
}
|
||
|
|
||
|
of_id = of_match_node(brcmstb_memc_of_match, dn);
|
||
|
if (!of_id) {
|
||
|
iounmap(base);
|
||
|
- return -EINVAL;
|
||
|
+ of_node_put(dn);
|
||
|
+ ret = -EINVAL;
|
||
|
+ goto brcmstb_memc_err;
|
||
|
}
|
||
|
|
||
|
ddr_seq_data = of_id->data;
|
||
|
@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
dn = of_find_matching_node(NULL, sram_dt_ids);
|
||
|
if (!dn) {
|
||
|
pr_err("SRAM not found\n");
|
||
|
- return -EINVAL;
|
||
|
+ ret = -EINVAL;
|
||
|
+ goto brcmstb_memc_err;
|
||
|
}
|
||
|
|
||
|
ret = brcmstb_init_sram(dn);
|
||
|
of_node_put(dn);
|
||
|
if (ret) {
|
||
|
pr_err("error setting up SRAM for PM\n");
|
||
|
- return ret;
|
||
|
+ goto brcmstb_memc_err;
|
||
|
}
|
||
|
|
||
|
ctrl.pdev = pdev;
|
||
|
|
||
|
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
|
||
|
- if (!ctrl.s3_params)
|
||
|
- return -ENOMEM;
|
||
|
+ if (!ctrl.s3_params) {
|
||
|
+ ret = -ENOMEM;
|
||
|
+ goto s3_params_err;
|
||
|
+ }
|
||
|
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
|
||
|
sizeof(*ctrl.s3_params),
|
||
|
DMA_TO_DEVICE);
|
||
|
@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
|
||
|
|
||
|
out:
|
||
|
kfree(ctrl.s3_params);
|
||
|
-
|
||
|
+s3_params_err:
|
||
|
+ iounmap(ctrl.boot_sram);
|
||
|
+brcmstb_memc_err:
|
||
|
+ for (i--; i >= 0; i--)
|
||
|
+ iounmap(ctrl.memcs[i].ddr_ctrl);
|
||
|
+ddr_shimphy_err:
|
||
|
+ for (i = 0; i < ctrl.num_memc; i++)
|
||
|
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
|
||
|
+
|
||
|
+ iounmap(ctrl.memcs[0].ddr_phy_base);
|
||
|
+ddr_phy_err:
|
||
|
+ iounmap(ctrl.aon_ctrl_base);
|
||
|
+ if (s)
|
||
|
+ iounmap(ctrl.aon_sram);
|
||
|
+aon_err:
|
||
|
pr_warn("PM: initialization failed with code %d\n", ret);
|
||
|
|
||
|
return ret;
|
||
|
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
|
||
|
index 07d52cafbb313..fcec6ed83d5e2 100644
|
||
|
--- a/drivers/soc/fsl/Kconfig
|
||
|
+++ b/drivers/soc/fsl/Kconfig
|
||
|
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
|
||
|
tristate "QorIQ DPAA2 DPIO driver"
|
||
|
depends on FSL_MC_BUS
|
||
|
select SOC_BUS
|
||
|
+ select FSL_GUTS
|
||
|
select DIMLIB
|
||
|
help
|
||
|
Driver for the DPAA2 DPIO object. A DPIO provides queue and
|
||
|
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
|
||
|
index 85aa86e1338af..5a3809f6a698f 100644
|
||
|
--- a/drivers/soc/imx/gpcv2.c
|
||
|
+++ b/drivers/soc/imx/gpcv2.c
|
||
|
@@ -333,6 +333,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
+ reset_control_assert(domain->reset);
|
||
|
+
|
||
|
/* Enable reset clocks for all devices in the domain */
|
||
|
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
|
||
|
if (ret) {
|
||
|
@@ -340,7 +342,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
|
||
|
goto out_regulator_disable;
|
||
|
}
|
||
|
|
||
|
- reset_control_assert(domain->reset);
|
||
|
+ /* delays for reset to propagate */
|
||
|
+ udelay(5);
|
||
|
|
||
|
if (domain->bits.pxx) {
|
||
|
/* request the domain to power up */
|
||
|
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
|
||
|
index 7ebc28709e945..2782a7e0a8719 100644
|
||
|
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
|
||
|
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
|
||
|
@@ -242,7 +242,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
|
||
|
ret = PTR_ERR(domain->power_dev);
|
||
|
goto cleanup_pds;
|
||
|
}
|
||
|
- dev_set_name(domain->power_dev, "%s", data->name);
|
||
|
|
||
|
domain->genpd.name = data->name;
|
||
|
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
|
||
|
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
|
||
|
index 267342dfa7388..2dcbe166df63e 100644
|
||
|
--- a/drivers/spi/spi-bitbang-txrx.h
|
||
|
+++ b/drivers/spi/spi-bitbang-txrx.h
|
||
|
@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
|
||
|
{
|
||
|
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
|
||
|
|
||
|
+ u8 rxbit = bits - 1;
|
||
|
u32 oldbit = !(word & 1);
|
||
|
/* clock starts at inactive polarity */
|
||
|
for (; likely(bits); bits--) {
|
||
|
@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
|
||
|
/* sample LSB (from slave) on leading edge */
|
||
|
word >>= 1;
|
||
|
if ((flags & SPI_MASTER_NO_RX) == 0)
|
||
|
- word |= getmiso(spi) << (bits - 1);
|
||
|
+ word |= getmiso(spi) << rxbit;
|
||
|
setsck(spi, cpol);
|
||
|
}
|
||
|
return word;
|
||
|
@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
|
||
|
{
|
||
|
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
|
||
|
|
||
|
+ u8 rxbit = bits - 1;
|
||
|
u32 oldbit = !(word & 1);
|
||
|
/* clock starts at inactive polarity */
|
||
|
for (; likely(bits); bits--) {
|
||
|
@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
|
||
|
/* sample LSB (from slave) on trailing edge */
|
||
|
word >>= 1;
|
||
|
if ((flags & SPI_MASTER_NO_RX) == 0)
|
||
|
- word |= getmiso(spi) << (bits - 1);
|
||
|
+ word |= getmiso(spi) << rxbit;
|
||
|
}
|
||
|
return word;
|
||
|
}
|
||
|
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
|
||
|
index 1175f3a46859f..27295bda3e0bd 100644
|
||
|
--- a/drivers/tee/tee_shm.c
|
||
|
+++ b/drivers/tee/tee_shm.c
|
||
|
@@ -9,6 +9,7 @@
|
||
|
#include <linux/sched.h>
|
||
|
#include <linux/slab.h>
|
||
|
#include <linux/tee_drv.h>
|
||
|
+#include <linux/uaccess.h>
|
||
|
#include <linux/uio.h>
|
||
|
#include "tee_private.h"
|
||
|
|
||
|
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
|
||
|
index 80d4e0676083a..365489bf4b8c1 100644
|
||
|
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
|
||
|
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
|
||
|
@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv)
|
||
|
priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer,
|
||
|
obj->package.elements[0].buffer.length,
|
||
|
GFP_KERNEL);
|
||
|
- if (!priv->data_vault)
|
||
|
+ if (ZERO_OR_NULL_PTR(priv->data_vault))
|
||
|
goto out_free;
|
||
|
|
||
|
bin_attr_data_vault.private = priv->data_vault;
|
||
|
@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev)
|
||
|
goto free_imok;
|
||
|
}
|
||
|
|
||
|
- if (priv->data_vault) {
|
||
|
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
|
||
|
result = sysfs_create_group(&pdev->dev.kobj,
|
||
|
&data_attribute_group);
|
||
|
if (result)
|
||
|
@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
|
||
|
free_sysfs:
|
||
|
cleanup_odvp(priv);
|
||
|
if (priv->data_vault) {
|
||
|
- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
|
||
|
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
|
||
|
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
|
||
|
kfree(priv->data_vault);
|
||
|
}
|
||
|
free_uuid:
|
||
|
@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
|
||
|
if (!priv->rel_misc_dev_res)
|
||
|
acpi_thermal_rel_misc_device_remove(priv->adev->handle);
|
||
|
|
||
|
- if (priv->data_vault)
|
||
|
+ if (!ZERO_OR_NULL_PTR(priv->data_vault))
|
||
|
sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
|
||
|
sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group);
|
||
|
sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group);
|
||
|
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
|
||
|
index a51ca56a0ebe7..829da9cb14a86 100644
|
||
|
--- a/drivers/ufs/core/ufshcd.c
|
||
|
+++ b/drivers/ufs/core/ufshcd.c
|
||
|
@@ -8723,6 +8723,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
|
||
|
struct scsi_device *sdp;
|
||
|
unsigned long flags;
|
||
|
int ret, retries;
|
||
|
+ unsigned long deadline;
|
||
|
+ int32_t remaining;
|
||
|
|
||
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||
|
sdp = hba->ufs_device_wlun;
|
||
|
@@ -8755,9 +8757,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
|
||
|
* callbacks hence set the RQF_PM flag so that it doesn't resume the
|
||
|
* already suspended childs.
|
||
|
*/
|
||
|
+ deadline = jiffies + 10 * HZ;
|
||
|
for (retries = 3; retries > 0; --retries) {
|
||
|
+ ret = -ETIMEDOUT;
|
||
|
+ remaining = deadline - jiffies;
|
||
|
+ if (remaining <= 0)
|
||
|
+ break;
|
||
|
ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
|
||
|
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
|
||
|
+ remaining / HZ, 0, 0, RQF_PM, NULL);
|
||
|
if (!scsi_status_is_check_condition(ret) ||
|
||
|
!scsi_sense_valid(&sshdr) ||
|
||
|
sshdr.sense_key != UNIT_ATTENTION)
|
||
|
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
|
||
|
index c13b9290e3575..d0057d18d2f4a 100644
|
||
|
--- a/drivers/vfio/vfio_iommu_type1.c
|
||
|
+++ b/drivers/vfio/vfio_iommu_type1.c
|
||
|
@@ -557,6 +557,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
|
||
|
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
|
||
|
pages, NULL, NULL);
|
||
|
if (ret > 0) {
|
||
|
+ int i;
|
||
|
+
|
||
|
+ /*
|
||
|
+ * The zero page is always resident, we don't need to pin it
|
||
|
+ * and it falls into our invalid/reserved test so we don't
|
||
|
+ * unpin in put_pfn(). Unpin all zero pages in the batch here.
|
||
|
+ */
|
||
|
+ for (i = 0 ; i < ret; i++) {
|
||
|
+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
|
||
|
+ unpin_user_page(pages[i]);
|
||
|
+ }
|
||
|
+
|
||
|
*pfn = page_to_pfn(pages[0]);
|
||
|
goto done;
|
||
|
}
|
||
|
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
|
||
|
index 393894af26f84..2b00a9d554fc0 100644
|
||
|
--- a/drivers/video/fbdev/chipsfb.c
|
||
|
+++ b/drivers/video/fbdev/chipsfb.c
|
||
|
@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
|
||
|
err_release_fb:
|
||
|
framebuffer_release(p);
|
||
|
err_disable:
|
||
|
+ pci_disable_device(dp);
|
||
|
err_out:
|
||
|
return rc;
|
||
|
}
|
||
|
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
|
||
|
index c2a60b187467e..4d7f63892dcc4 100644
|
||
|
--- a/drivers/video/fbdev/core/fbsysfs.c
|
||
|
+++ b/drivers/video/fbdev/core/fbsysfs.c
|
||
|
@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info)
|
||
|
if (WARN_ON(refcount_read(&info->count)))
|
||
|
return;
|
||
|
|
||
|
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
|
||
|
+ mutex_destroy(&info->bl_curve_mutex);
|
||
|
+#endif
|
||
|
+
|
||
|
kfree(info->apertures);
|
||
|
kfree(info);
|
||
|
}
|
||
|
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
|
||
|
index 292fcb0a24fc9..6ff237cee7f87 100644
|
||
|
--- a/drivers/video/fbdev/omap/omapfb_main.c
|
||
|
+++ b/drivers/video/fbdev/omap/omapfb_main.c
|
||
|
@@ -1643,14 +1643,14 @@ static int omapfb_do_probe(struct platform_device *pdev,
|
||
|
goto cleanup;
|
||
|
}
|
||
|
fbdev->int_irq = platform_get_irq(pdev, 0);
|
||
|
- if (!fbdev->int_irq) {
|
||
|
+ if (fbdev->int_irq < 0) {
|
||
|
dev_err(&pdev->dev, "unable to get irq\n");
|
||
|
r = ENXIO;
|
||
|
goto cleanup;
|
||
|
}
|
||
|
|
||
|
fbdev->ext_irq = platform_get_irq(pdev, 1);
|
||
|
- if (!fbdev->ext_irq) {
|
||
|
+ if (fbdev->ext_irq < 0) {
|
||
|
dev_err(&pdev->dev, "unable to get irq\n");
|
||
|
r = ENXIO;
|
||
|
goto cleanup;
|
||
|
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
|
||
|
index c4210a3964d8b..bbcc5afd15760 100644
|
||
|
--- a/fs/afs/flock.c
|
||
|
+++ b/fs/afs/flock.c
|
||
|
@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call)
|
||
|
if (call->error == 0) {
|
||
|
spin_lock(&vnode->lock);
|
||
|
trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
|
||
|
- vnode->locked_at = call->reply_time;
|
||
|
+ vnode->locked_at = call->issue_time;
|
||
|
afs_schedule_lock_extension(vnode);
|
||
|
spin_unlock(&vnode->lock);
|
||
|
}
|
||
|
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
|
||
|
index 4943413d9c5f7..7d37f63ef0f09 100644
|
||
|
--- a/fs/afs/fsclient.c
|
||
|
+++ b/fs/afs/fsclient.c
|
||
|
@@ -131,7 +131,7 @@ bad:
|
||
|
|
||
|
static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
|
||
|
{
|
||
|
- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
|
||
|
+ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
|
||
|
}
|
||
|
|
||
|
static void xdr_decode_AFSCallBack(const __be32 **_bp,
|
||
|
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
|
||
|
index a6f25d9e75b52..28bdd0387e5ea 100644
|
||
|
--- a/fs/afs/internal.h
|
||
|
+++ b/fs/afs/internal.h
|
||
|
@@ -137,7 +137,6 @@ struct afs_call {
|
||
|
bool need_attention; /* T if RxRPC poked us */
|
||
|
bool async; /* T if asynchronous */
|
||
|
bool upgrade; /* T to request service upgrade */
|
||
|
- bool have_reply_time; /* T if have got reply_time */
|
||
|
bool intr; /* T if interruptible */
|
||
|
bool unmarshalling_error; /* T if an unmarshalling error occurred */
|
||
|
u16 service_id; /* Actual service ID (after upgrade) */
|
||
|
@@ -151,7 +150,7 @@ struct afs_call {
|
||
|
} __attribute__((packed));
|
||
|
__be64 tmp64;
|
||
|
};
|
||
|
- ktime_t reply_time; /* Time of first reply packet */
|
||
|
+ ktime_t issue_time; /* Time of issue of operation */
|
||
|
};
|
||
|
|
||
|
struct afs_call_type {
|
||
|
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
|
||
|
index a5434f3e57c68..e3de7fea36435 100644
|
||
|
--- a/fs/afs/rxrpc.c
|
||
|
+++ b/fs/afs/rxrpc.c
|
||
|
@@ -347,6 +347,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
|
||
|
if (call->max_lifespan)
|
||
|
rxrpc_kernel_set_max_life(call->net->socket, rxcall,
|
||
|
call->max_lifespan);
|
||
|
+ call->issue_time = ktime_get_real();
|
||
|
|
||
|
/* send the request */
|
||
|
iov[0].iov_base = call->request;
|
||
|
@@ -497,12 +498,6 @@ static void afs_deliver_to_call(struct afs_call *call)
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
- if (!call->have_reply_time &&
|
||
|
- rxrpc_kernel_get_reply_time(call->net->socket,
|
||
|
- call->rxcall,
|
||
|
- &call->reply_time))
|
||
|
- call->have_reply_time = true;
|
||
|
-
|
||
|
ret = call->type->deliver(call);
|
||
|
state = READ_ONCE(call->state);
|
||
|
if (ret == 0 && call->unmarshalling_error)
|
||
|
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
|
||
|
index fdc7d675b4b0c..11571cca86c19 100644
|
||
|
--- a/fs/afs/yfsclient.c
|
||
|
+++ b/fs/afs/yfsclient.c
|
||
|
@@ -232,8 +232,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
|
||
|
struct afs_callback *cb = &scb->callback;
|
||
|
ktime_t cb_expiry;
|
||
|
|
||
|
- cb_expiry = call->reply_time;
|
||
|
- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
|
||
|
+ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
|
||
|
cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
|
||
|
scb->have_cb = true;
|
||
|
*_bp += xdr_size(x);
|
||
|
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
|
||
|
index 4d8acd7e63eb5..1bbc810574f22 100644
|
||
|
--- a/fs/btrfs/ctree.h
|
||
|
+++ b/fs/btrfs/ctree.h
|
||
|
@@ -1065,8 +1065,6 @@ struct btrfs_fs_info {
|
||
|
|
||
|
spinlock_t zone_active_bgs_lock;
|
||
|
struct list_head zone_active_bgs;
|
||
|
- /* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */
|
||
|
- wait_queue_head_t zone_finish_wait;
|
||
|
|
||
|
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
|
||
|
spinlock_t ref_verify_lock;
|
||
|
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
|
||
|
index a2505cfc6bc10..781952c5a5c23 100644
|
||
|
--- a/fs/btrfs/disk-io.c
|
||
|
+++ b/fs/btrfs/disk-io.c
|
||
|
@@ -3173,7 +3173,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
|
||
|
init_waitqueue_head(&fs_info->transaction_blocked_wait);
|
||
|
init_waitqueue_head(&fs_info->async_submit_wait);
|
||
|
init_waitqueue_head(&fs_info->delayed_iputs_wait);
|
||
|
- init_waitqueue_head(&fs_info->zone_finish_wait);
|
||
|
|
||
|
/* Usable values until the real ones are cached from the superblock */
|
||
|
fs_info->nodesize = 4096;
|
||
|
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
|
||
|
index 61496ecb1e201..f79f8d7cffcf2 100644
|
||
|
--- a/fs/btrfs/inode.c
|
||
|
+++ b/fs/btrfs/inode.c
|
||
|
@@ -1643,10 +1643,9 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
|
||
|
done_offset = end;
|
||
|
|
||
|
if (done_offset == start) {
|
||
|
- struct btrfs_fs_info *info = inode->root->fs_info;
|
||
|
-
|
||
|
- wait_var_event(&info->zone_finish_wait,
|
||
|
- !test_bit(BTRFS_FS_NEED_ZONE_FINISH, &info->flags));
|
||
|
+ wait_on_bit_io(&inode->root->fs_info->flags,
|
||
|
+ BTRFS_FS_NEED_ZONE_FINISH,
|
||
|
+ TASK_UNINTERRUPTIBLE);
|
||
|
continue;
|
||
|
}
|
||
|
|
||
|
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
|
||
|
index b0c5b4738b1f7..17623e6410c5d 100644
|
||
|
--- a/fs/btrfs/space-info.c
|
||
|
+++ b/fs/btrfs/space-info.c
|
||
|
@@ -199,7 +199,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
|
||
|
ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
|
||
|
|
||
|
if (flags & BTRFS_BLOCK_GROUP_DATA)
|
||
|
- return SZ_1G;
|
||
|
+ return BTRFS_MAX_DATA_CHUNK_SIZE;
|
||
|
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
|
||
|
return SZ_32M;
|
||
|
|
||
|
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
|
||
|
index 3460fd6743807..16e01fbdcec83 100644
|
||
|
--- a/fs/btrfs/volumes.c
|
||
|
+++ b/fs/btrfs/volumes.c
|
||
|
@@ -5266,6 +5266,9 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
|
||
|
ctl->stripe_size);
|
||
|
}
|
||
|
|
||
|
+ /* Stripe size should not go beyond 1G. */
|
||
|
+ ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
|
||
|
+
|
||
|
/* Align to BTRFS_STRIPE_LEN */
|
||
|
ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
|
||
|
ctl->chunk_size = ctl->stripe_size * data_stripes;
|
||
|
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
|
||
|
index 31cb11daa8e82..1386362fad3b8 100644
|
||
|
--- a/fs/btrfs/zoned.c
|
||
|
+++ b/fs/btrfs/zoned.c
|
||
|
@@ -421,10 +421,19 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
|
||
|
* since btrfs adds the pages one by one to a bio, and btrfs cannot
|
||
|
* increase the metadata reservation even if it increases the number of
|
||
|
* extents, it is safe to stick with the limit.
|
||
|
+ *
|
||
|
+ * With the zoned emulation, we can have non-zoned device on the zoned
|
||
|
+ * mode. In this case, we don't have a valid max zone append size. So,
|
||
|
+ * use max_segments * PAGE_SIZE as the pseudo max_zone_append_size.
|
||
|
*/
|
||
|
- zone_info->max_zone_append_size =
|
||
|
- min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
|
||
|
- (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
|
||
|
+ if (bdev_is_zoned(bdev)) {
|
||
|
+ zone_info->max_zone_append_size = min_t(u64,
|
||
|
+ (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
|
||
|
+ (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
|
||
|
+ } else {
|
||
|
+ zone_info->max_zone_append_size =
|
||
|
+ (u64)bdev_max_segments(bdev) << PAGE_SHIFT;
|
||
|
+ }
|
||
|
if (!IS_ALIGNED(nr_sectors, zone_sectors))
|
||
|
zone_info->nr_zones++;
|
||
|
|
||
|
@@ -1178,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
|
||
|
* offset.
|
||
|
*/
|
||
|
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
|
||
|
- u64 *offset_ret)
|
||
|
+ u64 *offset_ret, bool new)
|
||
|
{
|
||
|
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||
|
struct btrfs_root *root;
|
||
|
@@ -1188,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
|
||
|
int ret;
|
||
|
u64 length;
|
||
|
|
||
|
+ /*
|
||
|
+ * Avoid tree lookups for a new block group, there's no use for it.
|
||
|
+ * It must always be 0.
|
||
|
+ *
|
||
|
+ * Also, we have a lock chain of extent buffer lock -> chunk mutex.
|
||
|
+ * For new a block group, this function is called from
|
||
|
+ * btrfs_make_block_group() which is already taking the chunk mutex.
|
||
|
+ * Thus, we cannot call calculate_alloc_pointer() which takes extent
|
||
|
+ * buffer locks to avoid deadlock.
|
||
|
+ */
|
||
|
+ if (new) {
|
||
|
+ *offset_ret = 0;
|
||
|
+ return 0;
|
||
|
+ }
|
||
|
+
|
||
|
path = btrfs_alloc_path();
|
||
|
if (!path)
|
||
|
return -ENOMEM;
|
||
|
@@ -1323,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
||
|
else
|
||
|
num_conventional++;
|
||
|
|
||
|
+ /*
|
||
|
+ * Consider a zone as active if we can allow any number of
|
||
|
+ * active zones.
|
||
|
+ */
|
||
|
+ if (!device->zone_info->max_active_zones)
|
||
|
+ __set_bit(i, active);
|
||
|
+
|
||
|
if (!is_sequential) {
|
||
|
alloc_offsets[i] = WP_CONVENTIONAL;
|
||
|
continue;
|
||
|
@@ -1389,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
||
|
__set_bit(i, active);
|
||
|
break;
|
||
|
}
|
||
|
-
|
||
|
- /*
|
||
|
- * Consider a zone as active if we can allow any number of
|
||
|
- * active zones.
|
||
|
- */
|
||
|
- if (!device->zone_info->max_active_zones)
|
||
|
- __set_bit(i, active);
|
||
|
}
|
||
|
|
||
|
if (num_sequential > 0)
|
||
|
cache->seq_zone = true;
|
||
|
|
||
|
if (num_conventional > 0) {
|
||
|
- /*
|
||
|
- * Avoid calling calculate_alloc_pointer() for new BG. It
|
||
|
- * is no use for new BG. It must be always 0.
|
||
|
- *
|
||
|
- * Also, we have a lock chain of extent buffer lock ->
|
||
|
- * chunk mutex. For new BG, this function is called from
|
||
|
- * btrfs_make_block_group() which is already taking the
|
||
|
- * chunk mutex. Thus, we cannot call
|
||
|
- * calculate_alloc_pointer() which takes extent buffer
|
||
|
- * locks to avoid deadlock.
|
||
|
- */
|
||
|
-
|
||
|
/* Zone capacity is always zone size in emulation */
|
||
|
cache->zone_capacity = cache->length;
|
||
|
- if (new) {
|
||
|
- cache->alloc_offset = 0;
|
||
|
- goto out;
|
||
|
- }
|
||
|
- ret = calculate_alloc_pointer(cache, &last_alloc);
|
||
|
- if (ret || map->num_stripes == num_conventional) {
|
||
|
- if (!ret)
|
||
|
- cache->alloc_offset = last_alloc;
|
||
|
- else
|
||
|
- btrfs_err(fs_info,
|
||
|
+ ret = calculate_alloc_pointer(cache, &last_alloc, new);
|
||
|
+ if (ret) {
|
||
|
+ btrfs_err(fs_info,
|
||
|
"zoned: failed to determine allocation offset of bg %llu",
|
||
|
- cache->start);
|
||
|
+ cache->start);
|
||
|
+ goto out;
|
||
|
+ } else if (map->num_stripes == num_conventional) {
|
||
|
+ cache->alloc_offset = last_alloc;
|
||
|
+ cache->zone_is_active = 1;
|
||
|
goto out;
|
||
|
}
|
||
|
}
|
||
|
@@ -1495,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
|
||
|
goto out;
|
||
|
}
|
||
|
|
||
|
- if (cache->zone_is_active) {
|
||
|
- btrfs_get_block_group(cache);
|
||
|
- spin_lock(&fs_info->zone_active_bgs_lock);
|
||
|
- list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
|
||
|
- spin_unlock(&fs_info->zone_active_bgs_lock);
|
||
|
- }
|
||
|
-
|
||
|
out:
|
||
|
if (cache->alloc_offset > fs_info->zone_size) {
|
||
|
btrfs_err(fs_info,
|
||
|
@@ -1526,10 +1528,16 @@ out:
|
||
|
ret = -EIO;
|
||
|
}
|
||
|
|
||
|
- if (!ret)
|
||
|
+ if (!ret) {
|
||
|
cache->meta_write_pointer = cache->alloc_offset + cache->start;
|
||
|
-
|
||
|
- if (ret) {
|
||
|
+ if (cache->zone_is_active) {
|
||
|
+ btrfs_get_block_group(cache);
|
||
|
+ spin_lock(&fs_info->zone_active_bgs_lock);
|
||
|
+ list_add_tail(&cache->active_bg_list,
|
||
|
+ &fs_info->zone_active_bgs);
|
||
|
+ spin_unlock(&fs_info->zone_active_bgs_lock);
|
||
|
+ }
|
||
|
+ } else {
|
||
|
kfree(cache->physical_map);
|
||
|
cache->physical_map = NULL;
|
||
|
}
|
||
|
@@ -2007,8 +2015,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
|
||
|
/* For active_bg_list */
|
||
|
btrfs_put_block_group(block_group);
|
||
|
|
||
|
- clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
|
||
|
- wake_up_all(&fs_info->zone_finish_wait);
|
||
|
+ clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
|
||
|
index f5dcc4940b6da..9dfd2dd612c25 100644
|
||
|
--- a/fs/cifs/smb2file.c
|
||
|
+++ b/fs/cifs/smb2file.c
|
||
|
@@ -61,7 +61,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
|
||
|
nr_ioctl_req.Reserved = 0;
|
||
|
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
|
||
|
fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
|
||
|
- true /* is_fsctl */,
|
||
|
(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
|
||
|
CIFSMaxBufSize, NULL, NULL /* no return info */);
|
||
|
if (rc == -EOPNOTSUPP) {
|
||
|
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
|
||
|
index 3898ec2632dc4..e8a8daa82ed76 100644
|
||
|
--- a/fs/cifs/smb2ops.c
|
||
|
+++ b/fs/cifs/smb2ops.c
|
||
|
@@ -680,7 +680,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
|
||
|
struct cifs_ses *ses = tcon->ses;
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
|
||
|
- FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
|
||
|
+ FSCTL_QUERY_NETWORK_INTERFACE_INFO,
|
||
|
NULL /* no data input */, 0 /* no data input */,
|
||
|
CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
|
||
|
if (rc == -EOPNOTSUPP) {
|
||
|
@@ -1609,9 +1609,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
struct resume_key_req *res_key;
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
|
||
|
- FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
|
||
|
- NULL, 0 /* no input */, CIFSMaxBufSize,
|
||
|
- (char **)&res_key, &ret_data_len);
|
||
|
+ FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
|
||
|
+ CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
|
||
|
|
||
|
if (rc == -EOPNOTSUPP) {
|
||
|
pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
|
||
|
@@ -1753,7 +1752,7 @@ smb2_ioctl_query_info(const unsigned int xid,
|
||
|
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
|
||
|
|
||
|
rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
|
||
|
- qi.info_type, true, buffer, qi.output_buffer_length,
|
||
|
+ qi.info_type, buffer, qi.output_buffer_length,
|
||
|
CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
|
||
|
MAX_SMB2_CLOSE_RESPONSE_SIZE);
|
||
|
free_req1_func = SMB2_ioctl_free;
|
||
|
@@ -1929,9 +1928,8 @@ smb2_copychunk_range(const unsigned int xid,
|
||
|
retbuf = NULL;
|
||
|
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
|
||
|
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
|
||
|
- true /* is_fsctl */, (char *)pcchunk,
|
||
|
- sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
|
||
|
- (char **)&retbuf, &ret_data_len);
|
||
|
+ (char *)pcchunk, sizeof(struct copychunk_ioctl),
|
||
|
+ CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
|
||
|
if (rc == 0) {
|
||
|
if (ret_data_len !=
|
||
|
sizeof(struct copychunk_ioctl_rsp)) {
|
||
|
@@ -2091,7 +2089,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
|
||
|
- true /* is_fctl */,
|
||
|
&setsparse, 1, CIFSMaxBufSize, NULL, NULL);
|
||
|
if (rc) {
|
||
|
tcon->broken_sparse_sup = true;
|
||
|
@@ -2174,7 +2171,6 @@ smb2_duplicate_extents(const unsigned int xid,
|
||
|
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
|
||
|
trgtfile->fid.volatile_fid,
|
||
|
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
|
||
|
- true /* is_fsctl */,
|
||
|
(char *)&dup_ext_buf,
|
||
|
sizeof(struct duplicate_extents_to_file),
|
||
|
CIFSMaxBufSize, NULL,
|
||
|
@@ -2209,7 +2205,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid,
|
||
|
FSCTL_SET_INTEGRITY_INFORMATION,
|
||
|
- true /* is_fsctl */,
|
||
|
(char *)&integr_info,
|
||
|
sizeof(struct fsctl_set_integrity_information_req),
|
||
|
CIFSMaxBufSize, NULL,
|
||
|
@@ -2262,7 +2257,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid,
|
||
|
FSCTL_SRV_ENUMERATE_SNAPSHOTS,
|
||
|
- true /* is_fsctl */,
|
||
|
NULL, 0 /* no input data */, max_response_size,
|
||
|
(char **)&retbuf,
|
||
|
&ret_data_len);
|
||
|
@@ -2982,7 +2976,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
|
||
|
do {
|
||
|
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
|
||
|
FSCTL_DFS_GET_REFERRALS,
|
||
|
- true /* is_fsctl */,
|
||
|
(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
|
||
|
(char **)&dfs_rsp, &dfs_rsp_size);
|
||
|
if (!is_retryable_error(rc))
|
||
|
@@ -3189,8 +3182,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
|
||
|
rc = SMB2_ioctl_init(tcon, server,
|
||
|
&rqst[1], fid.persistent_fid,
|
||
|
- fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
|
||
|
- true /* is_fctl */, NULL, 0,
|
||
|
+ fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
|
||
|
CIFSMaxBufSize -
|
||
|
MAX_SMB2_CREATE_RESPONSE_SIZE -
|
||
|
MAX_SMB2_CLOSE_RESPONSE_SIZE);
|
||
|
@@ -3370,8 +3362,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
|
||
|
rc = SMB2_ioctl_init(tcon, server,
|
||
|
&rqst[1], COMPOUND_FID,
|
||
|
- COMPOUND_FID, FSCTL_GET_REPARSE_POINT,
|
||
|
- true /* is_fctl */, NULL, 0,
|
||
|
+ COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
|
||
|
CIFSMaxBufSize -
|
||
|
MAX_SMB2_CREATE_RESPONSE_SIZE -
|
||
|
MAX_SMB2_CLOSE_RESPONSE_SIZE);
|
||
|
@@ -3599,26 +3590,43 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb,
|
||
|
return pntsd;
|
||
|
}
|
||
|
|
||
|
+static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
|
||
|
+ loff_t offset, loff_t len, unsigned int xid)
|
||
|
+{
|
||
|
+ struct cifsFileInfo *cfile = file->private_data;
|
||
|
+ struct file_zero_data_information fsctl_buf;
|
||
|
+
|
||
|
+ cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
|
||
|
+
|
||
|
+ fsctl_buf.FileOffset = cpu_to_le64(offset);
|
||
|
+ fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
|
||
|
+
|
||
|
+ return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
+ cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
|
||
|
+ (char *)&fsctl_buf,
|
||
|
+ sizeof(struct file_zero_data_information),
|
||
|
+ 0, NULL, NULL);
|
||
|
+}
|
||
|
+
|
||
|
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
|
||
|
loff_t offset, loff_t len, bool keep_size)
|
||
|
{
|
||
|
struct cifs_ses *ses = tcon->ses;
|
||
|
- struct inode *inode;
|
||
|
- struct cifsInodeInfo *cifsi;
|
||
|
+ struct inode *inode = file_inode(file);
|
||
|
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
||
|
struct cifsFileInfo *cfile = file->private_data;
|
||
|
- struct file_zero_data_information fsctl_buf;
|
||
|
long rc;
|
||
|
unsigned int xid;
|
||
|
__le64 eof;
|
||
|
|
||
|
xid = get_xid();
|
||
|
|
||
|
- inode = d_inode(cfile->dentry);
|
||
|
- cifsi = CIFS_I(inode);
|
||
|
-
|
||
|
trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
|
||
|
ses->Suid, offset, len);
|
||
|
|
||
|
+ inode_lock(inode);
|
||
|
+ filemap_invalidate_lock(inode->i_mapping);
|
||
|
+
|
||
|
/*
|
||
|
* We zero the range through ioctl, so we need remove the page caches
|
||
|
* first, otherwise the data may be inconsistent with the server.
|
||
|
@@ -3626,26 +3634,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
|
||
|
truncate_pagecache_range(inode, offset, offset + len - 1);
|
||
|
|
||
|
/* if file not oplocked can't be sure whether asking to extend size */
|
||
|
- if (!CIFS_CACHE_READ(cifsi))
|
||
|
- if (keep_size == false) {
|
||
|
- rc = -EOPNOTSUPP;
|
||
|
- trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
|
||
|
- tcon->tid, ses->Suid, offset, len, rc);
|
||
|
- free_xid(xid);
|
||
|
- return rc;
|
||
|
- }
|
||
|
-
|
||
|
- cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
|
||
|
-
|
||
|
- fsctl_buf.FileOffset = cpu_to_le64(offset);
|
||
|
- fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
|
||
|
+ rc = -EOPNOTSUPP;
|
||
|
+ if (keep_size == false && !CIFS_CACHE_READ(cifsi))
|
||
|
+ goto zero_range_exit;
|
||
|
|
||
|
- rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
- cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
|
||
|
- (char *)&fsctl_buf,
|
||
|
- sizeof(struct file_zero_data_information),
|
||
|
- 0, NULL, NULL);
|
||
|
- if (rc)
|
||
|
+ rc = smb3_zero_data(file, tcon, offset, len, xid);
|
||
|
+ if (rc < 0)
|
||
|
goto zero_range_exit;
|
||
|
|
||
|
/*
|
||
|
@@ -3658,6 +3652,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
|
||
|
}
|
||
|
|
||
|
zero_range_exit:
|
||
|
+ filemap_invalidate_unlock(inode->i_mapping);
|
||
|
+ inode_unlock(inode);
|
||
|
free_xid(xid);
|
||
|
if (rc)
|
||
|
trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
|
||
|
@@ -3702,7 +3698,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
|
||
|
- true /* is_fctl */, (char *)&fsctl_buf,
|
||
|
+ (char *)&fsctl_buf,
|
||
|
sizeof(struct file_zero_data_information),
|
||
|
CIFSMaxBufSize, NULL, NULL);
|
||
|
filemap_invalidate_unlock(inode->i_mapping);
|
||
|
@@ -3764,7 +3760,7 @@ static int smb3_simple_fallocate_range(unsigned int xid,
|
||
|
in_data.length = cpu_to_le64(len);
|
||
|
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid,
|
||
|
- FSCTL_QUERY_ALLOCATED_RANGES, true,
|
||
|
+ FSCTL_QUERY_ALLOCATED_RANGES,
|
||
|
(char *)&in_data, sizeof(in_data),
|
||
|
1024 * sizeof(struct file_allocated_range_buffer),
|
||
|
(char **)&out_data, &out_data_len);
|
||
|
@@ -4085,7 +4081,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid,
|
||
|
- FSCTL_QUERY_ALLOCATED_RANGES, true,
|
||
|
+ FSCTL_QUERY_ALLOCATED_RANGES,
|
||
|
(char *)&in_data, sizeof(in_data),
|
||
|
sizeof(struct file_allocated_range_buffer),
|
||
|
(char **)&out_data, &out_data_len);
|
||
|
@@ -4145,7 +4141,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
|
||
|
cfile->fid.volatile_fid,
|
||
|
- FSCTL_QUERY_ALLOCATED_RANGES, true,
|
||
|
+ FSCTL_QUERY_ALLOCATED_RANGES,
|
||
|
(char *)&in_data, sizeof(in_data),
|
||
|
1024 * sizeof(struct file_allocated_range_buffer),
|
||
|
(char **)&out_data, &out_data_len);
|
||
|
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
|
||
|
index ba58d7fd54f9e..31d37afae741f 100644
|
||
|
--- a/fs/cifs/smb2pdu.c
|
||
|
+++ b/fs/cifs/smb2pdu.c
|
||
|
@@ -1174,7 +1174,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
|
||
|
}
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
|
||
|
- FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
|
||
|
+ FSCTL_VALIDATE_NEGOTIATE_INFO,
|
||
|
(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
|
||
|
(char **)&pneg_rsp, &rsplen);
|
||
|
if (rc == -EOPNOTSUPP) {
|
||
|
@@ -3053,7 +3053,7 @@ int
|
||
|
SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
|
||
|
struct smb_rqst *rqst,
|
||
|
u64 persistent_fid, u64 volatile_fid, u32 opcode,
|
||
|
- bool is_fsctl, char *in_data, u32 indatalen,
|
||
|
+ char *in_data, u32 indatalen,
|
||
|
__u32 max_response_size)
|
||
|
{
|
||
|
struct smb2_ioctl_req *req;
|
||
|
@@ -3128,10 +3128,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
|
||
|
req->hdr.CreditCharge =
|
||
|
cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
|
||
|
SMB2_MAX_BUFFER_SIZE));
|
||
|
- if (is_fsctl)
|
||
|
- req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
|
||
|
- else
|
||
|
- req->Flags = 0;
|
||
|
+ /* always an FSCTL (for now) */
|
||
|
+ req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
|
||
|
|
||
|
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
|
||
|
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
|
||
|
@@ -3158,9 +3156,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
|
||
|
*/
|
||
|
int
|
||
|
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
|
||
|
- u64 volatile_fid, u32 opcode, bool is_fsctl,
|
||
|
- char *in_data, u32 indatalen, u32 max_out_data_len,
|
||
|
- char **out_data, u32 *plen /* returned data len */)
|
||
|
+ u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
|
||
|
+ u32 max_out_data_len, char **out_data,
|
||
|
+ u32 *plen /* returned data len */)
|
||
|
{
|
||
|
struct smb_rqst rqst;
|
||
|
struct smb2_ioctl_rsp *rsp = NULL;
|
||
|
@@ -3202,7 +3200,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
|
||
|
|
||
|
rc = SMB2_ioctl_init(tcon, server,
|
||
|
&rqst, persistent_fid, volatile_fid, opcode,
|
||
|
- is_fsctl, in_data, indatalen, max_out_data_len);
|
||
|
+ in_data, indatalen, max_out_data_len);
|
||
|
if (rc)
|
||
|
goto ioctl_exit;
|
||
|
|
||
|
@@ -3294,7 +3292,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
|
||
|
|
||
|
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
|
||
|
- FSCTL_SET_COMPRESSION, true /* is_fsctl */,
|
||
|
+ FSCTL_SET_COMPRESSION,
|
||
|
(char *)&fsctl_input /* data input */,
|
||
|
2 /* in data len */, CIFSMaxBufSize /* max out data */,
|
||
|
&ret_data /* out data */, NULL);
|
||
|
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
|
||
|
index a69f1eed1cfe5..d57d7202dc367 100644
|
||
|
--- a/fs/cifs/smb2proto.h
|
||
|
+++ b/fs/cifs/smb2proto.h
|
||
|
@@ -147,13 +147,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon,
|
||
|
extern void SMB2_open_free(struct smb_rqst *rqst);
|
||
|
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
u64 persistent_fid, u64 volatile_fid, u32 opcode,
|
||
|
- bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
|
||
|
+ char *in_data, u32 indatalen, u32 maxoutlen,
|
||
|
char **out_data, u32 *plen /* returned data len */);
|
||
|
extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
|
||
|
struct TCP_Server_Info *server,
|
||
|
struct smb_rqst *rqst,
|
||
|
u64 persistent_fid, u64 volatile_fid, u32 opcode,
|
||
|
- bool is_fsctl, char *in_data, u32 indatalen,
|
||
|
+ char *in_data, u32 indatalen,
|
||
|
__u32 max_response_size);
|
||
|
extern void SMB2_ioctl_free(struct smb_rqst *rqst);
|
||
|
extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
|
||
|
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
|
||
|
index 3dcf0b8b4e932..232cfdf095aeb 100644
|
||
|
--- a/fs/debugfs/inode.c
|
||
|
+++ b/fs/debugfs/inode.c
|
||
|
@@ -744,6 +744,28 @@ void debugfs_remove(struct dentry *dentry)
|
||
|
}
|
||
|
EXPORT_SYMBOL_GPL(debugfs_remove);
|
||
|
|
||
|
+/**
|
||
|
+ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
|
||
|
+ * @name: a pointer to a string containing the name of the item to look up.
|
||
|
+ * @parent: a pointer to the parent dentry of the item.
|
||
|
+ *
|
||
|
+ * This is the equlivant of doing something like
|
||
|
+ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
|
||
|
+ * handled for the directory being looked up.
|
||
|
+ */
|
||
|
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
|
||
|
+{
|
||
|
+ struct dentry *dentry;
|
||
|
+
|
||
|
+ dentry = debugfs_lookup(name, parent);
|
||
|
+ if (!dentry)
|
||
|
+ return;
|
||
|
+
|
||
|
+ debugfs_remove(dentry);
|
||
|
+ dput(dentry);
|
||
|
+}
|
||
|
+EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
|
||
|
+
|
||
|
/**
|
||
|
* debugfs_rename - rename a file/directory in the debugfs filesystem
|
||
|
* @old_dir: a pointer to the parent dentry for the renamed object. This
|
||
|
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
|
||
|
index 8e01d89c3319e..b5fd9d71e67f1 100644
|
||
|
--- a/fs/erofs/fscache.c
|
||
|
+++ b/fs/erofs/fscache.c
|
||
|
@@ -222,8 +222,10 @@ static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
|
||
|
|
||
|
rreq = erofs_fscache_alloc_request(folio_mapping(folio),
|
||
|
folio_pos(folio), folio_size(folio));
|
||
|
- if (IS_ERR(rreq))
|
||
|
+ if (IS_ERR(rreq)) {
|
||
|
+ ret = PTR_ERR(rreq);
|
||
|
goto out;
|
||
|
+ }
|
||
|
|
||
|
return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
|
||
|
rreq, mdev.m_pa);
|
||
|
@@ -301,8 +303,10 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
|
||
|
|
||
|
rreq = erofs_fscache_alloc_request(folio_mapping(folio),
|
||
|
folio_pos(folio), folio_size(folio));
|
||
|
- if (IS_ERR(rreq))
|
||
|
+ if (IS_ERR(rreq)) {
|
||
|
+ ret = PTR_ERR(rreq);
|
||
|
goto out_unlock;
|
||
|
+ }
|
||
|
|
||
|
pstart = mdev.m_pa + (pos - map.m_la);
|
||
|
return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
|
||
|
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
|
||
|
index cfee49d33b95a..a01cc82795a25 100644
|
||
|
--- a/fs/erofs/internal.h
|
||
|
+++ b/fs/erofs/internal.h
|
||
|
@@ -195,7 +195,6 @@ struct erofs_workgroup {
|
||
|
atomic_t refcount;
|
||
|
};
|
||
|
|
||
|
-#if defined(CONFIG_SMP)
|
||
|
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
||
|
int val)
|
||
|
{
|
||
|
@@ -224,34 +223,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
||
|
return atomic_cond_read_relaxed(&grp->refcount,
|
||
|
VAL != EROFS_LOCKED_MAGIC);
|
||
|
}
|
||
|
-#else
|
||
|
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
||
|
- int val)
|
||
|
-{
|
||
|
- preempt_disable();
|
||
|
- /* no need to spin on UP platforms, let's just disable preemption. */
|
||
|
- if (val != atomic_read(&grp->refcount)) {
|
||
|
- preempt_enable();
|
||
|
- return false;
|
||
|
- }
|
||
|
- return true;
|
||
|
-}
|
||
|
-
|
||
|
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
|
||
|
- int orig_val)
|
||
|
-{
|
||
|
- preempt_enable();
|
||
|
-}
|
||
|
-
|
||
|
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
||
|
-{
|
||
|
- int v = atomic_read(&grp->refcount);
|
||
|
-
|
||
|
- /* workgroup is never freezed on uniprocessor systems */
|
||
|
- DBG_BUGON(v == EROFS_LOCKED_MAGIC);
|
||
|
- return v;
|
||
|
-}
|
||
|
-#endif /* !CONFIG_SMP */
|
||
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||
|
|
||
|
/* we strictly follow PAGE_SIZE and no buffer head yet */
|
||
|
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
|
||
|
index 81d26abf486fa..da85b39791957 100644
|
||
|
--- a/fs/tracefs/inode.c
|
||
|
+++ b/fs/tracefs/inode.c
|
||
|
@@ -141,6 +141,8 @@ struct tracefs_mount_opts {
|
||
|
kuid_t uid;
|
||
|
kgid_t gid;
|
||
|
umode_t mode;
|
||
|
+ /* Opt_* bitfield. */
|
||
|
+ unsigned int opts;
|
||
|
};
|
||
|
|
||
|
enum {
|
||
|
@@ -241,6 +243,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
|
||
|
kgid_t gid;
|
||
|
char *p;
|
||
|
|
||
|
+ opts->opts = 0;
|
||
|
opts->mode = TRACEFS_DEFAULT_MODE;
|
||
|
|
||
|
while ((p = strsep(&data, ",")) != NULL) {
|
||
|
@@ -275,24 +278,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
|
||
|
* but traditionally tracefs has ignored all mount options
|
||
|
*/
|
||
|
}
|
||
|
+
|
||
|
+ opts->opts |= BIT(token);
|
||
|
}
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
-static int tracefs_apply_options(struct super_block *sb)
|
||
|
+static int tracefs_apply_options(struct super_block *sb, bool remount)
|
||
|
{
|
||
|
struct tracefs_fs_info *fsi = sb->s_fs_info;
|
||
|
struct inode *inode = d_inode(sb->s_root);
|
||
|
struct tracefs_mount_opts *opts = &fsi->mount_opts;
|
||
|
|
||
|
- inode->i_mode &= ~S_IALLUGO;
|
||
|
- inode->i_mode |= opts->mode;
|
||
|
+ /*
|
||
|
+ * On remount, only reset mode/uid/gid if they were provided as mount
|
||
|
+ * options.
|
||
|
+ */
|
||
|
+
|
||
|
+ if (!remount || opts->opts & BIT(Opt_mode)) {
|
||
|
+ inode->i_mode &= ~S_IALLUGO;
|
||
|
+ inode->i_mode |= opts->mode;
|
||
|
+ }
|
||
|
|
||
|
- inode->i_uid = opts->uid;
|
||
|
+ if (!remount || opts->opts & BIT(Opt_uid))
|
||
|
+ inode->i_uid = opts->uid;
|
||
|
|
||
|
- /* Set all the group ids to the mount option */
|
||
|
- set_gid(sb->s_root, opts->gid);
|
||
|
+ if (!remount || opts->opts & BIT(Opt_gid)) {
|
||
|
+ /* Set all the group ids to the mount option */
|
||
|
+ set_gid(sb->s_root, opts->gid);
|
||
|
+ }
|
||
|
|
||
|
return 0;
|
||
|
}
|
||
|
@@ -307,7 +322,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data)
|
||
|
if (err)
|
||
|
goto fail;
|
||
|
|
||
|
- tracefs_apply_options(sb);
|
||
|
+ tracefs_apply_options(sb, true);
|
||
|
|
||
|
fail:
|
||
|
return err;
|
||
|
@@ -359,7 +374,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
|
||
|
|
||
|
sb->s_op = &tracefs_super_operations;
|
||
|
|
||
|
- tracefs_apply_options(sb);
|
||
|
+ tracefs_apply_options(sb, false);
|
||
|
|
||
|
return 0;
|
||
|
|
||
|
diff --git a/include/kunit/test.h b/include/kunit/test.h
|
||
|
index 8ffcd7de96070..648dbb00a3008 100644
|
||
|
--- a/include/kunit/test.h
|
||
|
+++ b/include/kunit/test.h
|
||
|
@@ -863,7 +863,7 @@ do { \
|
||
|
|
||
|
#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
|
||
|
KUNIT_BINARY_INT_ASSERTION(test, \
|
||
|
- KUNIT_ASSERTION, \
|
||
|
+ KUNIT_EXPECTATION, \
|
||
|
left, <=, right, \
|
||
|
fmt, \
|
||
|
##__VA_ARGS__)
|
||
|
@@ -1153,7 +1153,7 @@ do { \
|
||
|
|
||
|
#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
|
||
|
KUNIT_BINARY_INT_ASSERTION(test, \
|
||
|
- KUNIT_EXPECTATION, \
|
||
|
+ KUNIT_ASSERTION, \
|
||
|
left, <, right, \
|
||
|
fmt, \
|
||
|
##__VA_ARGS__)
|
||
|
@@ -1194,7 +1194,7 @@ do { \
|
||
|
|
||
|
#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
|
||
|
KUNIT_BINARY_INT_ASSERTION(test, \
|
||
|
- KUNIT_EXPECTATION, \
|
||
|
+ KUNIT_ASSERTION, \
|
||
|
left, >, right, \
|
||
|
fmt, \
|
||
|
##__VA_ARGS__)
|
||
|
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
|
||
|
index badcc0e3418f2..262664107b839 100644
|
||
|
--- a/include/linux/buffer_head.h
|
||
|
+++ b/include/linux/buffer_head.h
|
||
|
@@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion)
|
||
|
|
||
|
static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
|
||
|
{
|
||
|
+ /*
|
||
|
+ * If somebody else already set this uptodate, they will
|
||
|
+ * have done the memory barrier, and a reader will thus
|
||
|
+ * see *some* valid buffer state.
|
||
|
+ *
|
||
|
+ * Any other serialization (with IO errors or whatever that
|
||
|
+ * might clear the bit) has to come from other state (eg BH_Lock).
|
||
|
+ */
|
||
|
+ if (test_bit(BH_Uptodate, &bh->b_state))
|
||
|
+ return;
|
||
|
+
|
||
|
/*
|
||
|
* make it consistent with folio_mark_uptodate
|
||
|
* pairs with smp_load_acquire in buffer_uptodate
|
||
|
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
|
||
|
index c869f1e73d755..f60674692d365 100644
|
||
|
--- a/include/linux/debugfs.h
|
||
|
+++ b/include/linux/debugfs.h
|
||
|
@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name,
|
||
|
void debugfs_remove(struct dentry *dentry);
|
||
|
#define debugfs_remove_recursive debugfs_remove
|
||
|
|
||
|
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
|
||
|
+
|
||
|
const struct file_operations *debugfs_real_fops(const struct file *filp);
|
||
|
|
||
|
int debugfs_file_get(struct dentry *dentry);
|
||
|
@@ -225,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry)
|
||
|
static inline void debugfs_remove_recursive(struct dentry *dentry)
|
||
|
{ }
|
||
|
|
||
|
+static inline void debugfs_lookup_and_remove(const char *name,
|
||
|
+ struct dentry *parent)
|
||
|
+{ }
|
||
|
+
|
||
|
const struct file_operations *debugfs_real_fops(const struct file *filp);
|
||
|
|
||
|
static inline int debugfs_file_get(struct dentry *dentry)
|
||
|
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
|
||
|
index cbd714a198a0a..f3a3d95df5325 100644
|
||
|
--- a/include/linux/dmar.h
|
||
|
+++ b/include/linux/dmar.h
|
||
|
@@ -69,6 +69,7 @@ struct dmar_pci_notify_info {
|
||
|
|
||
|
extern struct rw_semaphore dmar_global_lock;
|
||
|
extern struct list_head dmar_drhd_units;
|
||
|
+extern int intel_iommu_enabled;
|
||
|
|
||
|
#define for_each_drhd_unit(drhd) \
|
||
|
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
|
||
|
@@ -92,7 +93,8 @@ extern struct list_head dmar_drhd_units;
|
||
|
static inline bool dmar_rcu_check(void)
|
||
|
{
|
||
|
return rwsem_is_locked(&dmar_global_lock) ||
|
||
|
- system_state == SYSTEM_BOOTING;
|
||
|
+ system_state == SYSTEM_BOOTING ||
|
||
|
+ (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
|
||
|
}
|
||
|
|
||
|
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
|
||
|
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
|
||
|
index eafa1d2489fda..4e94755098f19 100644
|
||
|
--- a/include/linux/lsm_hook_defs.h
|
||
|
+++ b/include/linux/lsm_hook_defs.h
|
||
|
@@ -406,4 +406,5 @@ LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
|
||
|
#ifdef CONFIG_IO_URING
|
||
|
LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
|
||
|
LSM_HOOK(int, 0, uring_sqpoll, void)
|
||
|
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
|
||
|
#endif /* CONFIG_IO_URING */
|
||
|
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
|
||
|
index 91c8146649f59..b681cfce6190a 100644
|
||
|
--- a/include/linux/lsm_hooks.h
|
||
|
+++ b/include/linux/lsm_hooks.h
|
||
|
@@ -1575,6 +1575,9 @@
|
||
|
* Check whether the current task is allowed to spawn a io_uring polling
|
||
|
* thread (IORING_SETUP_SQPOLL).
|
||
|
*
|
||
|
+ * @uring_cmd:
|
||
|
+ * Check whether the file_operations uring_cmd is allowed to run.
|
||
|
+ *
|
||
|
*/
|
||
|
union security_list_options {
|
||
|
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
|
||
|
diff --git a/include/linux/security.h b/include/linux/security.h
|
||
|
index 7fc4e9f49f542..3cc127bb5bfd4 100644
|
||
|
--- a/include/linux/security.h
|
||
|
+++ b/include/linux/security.h
|
||
|
@@ -2051,6 +2051,7 @@ static inline int security_perf_event_write(struct perf_event *event)
|
||
|
#ifdef CONFIG_SECURITY
|
||
|
extern int security_uring_override_creds(const struct cred *new);
|
||
|
extern int security_uring_sqpoll(void);
|
||
|
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
|
||
|
#else
|
||
|
static inline int security_uring_override_creds(const struct cred *new)
|
||
|
{
|
||
|
@@ -2060,6 +2061,10 @@ static inline int security_uring_sqpoll(void)
|
||
|
{
|
||
|
return 0;
|
||
|
}
|
||
|
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
|
||
|
+{
|
||
|
+ return 0;
|
||
|
+}
|
||
|
#endif /* CONFIG_SECURITY */
|
||
|
#endif /* CONFIG_IO_URING */
|
||
|
|
||
|
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
|
||
|
index 2f41364a6791e..63d0a21b63162 100644
|
||
|
--- a/include/linux/skbuff.h
|
||
|
+++ b/include/linux/skbuff.h
|
||
|
@@ -2528,6 +2528,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
|
||
|
return skb_headlen(skb) + __skb_pagelen(skb);
|
||
|
}
|
||
|
|
||
|
+static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
|
||
|
+ int i, struct page *page,
|
||
|
+ int off, int size)
|
||
|
+{
|
||
|
+ skb_frag_t *frag = &shinfo->frags[i];
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Propagate page pfmemalloc to the skb if we can. The problem is
|
||
|
+ * that not all callers have unique ownership of the page but rely
|
||
|
+ * on page_is_pfmemalloc doing the right thing(tm).
|
||
|
+ */
|
||
|
+ frag->bv_page = page;
|
||
|
+ frag->bv_offset = off;
|
||
|
+ skb_frag_size_set(frag, size);
|
||
|
+}
|
||
|
+
|
||
|
/**
|
||
|
* __skb_fill_page_desc - initialise a paged fragment in an skb
|
||
|
* @skb: buffer containing fragment to be initialised
|
||
|
@@ -2544,17 +2560,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
|
||
|
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
|
||
|
struct page *page, int off, int size)
|
||
|
{
|
||
|
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||
|
-
|
||
|
- /*
|
||
|
- * Propagate page pfmemalloc to the skb if we can. The problem is
|
||
|
- * that not all callers have unique ownership of the page but rely
|
||
|
- * on page_is_pfmemalloc doing the right thing(tm).
|
||
|
- */
|
||
|
- frag->bv_page = page;
|
||
|
- frag->bv_offset = off;
|
||
|
- skb_frag_size_set(frag, size);
|
||
|
-
|
||
|
+ __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
|
||
|
page = compound_head(page);
|
||
|
if (page_is_pfmemalloc(page))
|
||
|
skb->pfmemalloc = true;
|
||
|
@@ -2581,6 +2587,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
|
||
|
skb_shinfo(skb)->nr_frags = i + 1;
|
||
|
}
|
||
|
|
||
|
+/**
|
||
|
+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
|
||
|
+ * @skb: buffer containing fragment to be initialised
|
||
|
+ * @i: paged fragment index to initialise
|
||
|
+ * @page: the page to use for this fragment
|
||
|
+ * @off: the offset to the data with @page
|
||
|
+ * @size: the length of the data
|
||
|
+ *
|
||
|
+ * Variant of skb_fill_page_desc() which does not deal with
|
||
|
+ * pfmemalloc, if page is not owned by us.
|
||
|
+ */
|
||
|
+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
|
||
|
+ struct page *page, int off,
|
||
|
+ int size)
|
||
|
+{
|
||
|
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||
|
+
|
||
|
+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
|
||
|
+ shinfo->nr_frags = i + 1;
|
||
|
+}
|
||
|
+
|
||
|
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
|
||
|
int size, unsigned int truesize);
|
||
|
|
||
|
diff --git a/include/linux/time64.h b/include/linux/time64.h
|
||
|
index 81b9686a20799..2fb8232cff1d5 100644
|
||
|
--- a/include/linux/time64.h
|
||
|
+++ b/include/linux/time64.h
|
||
|
@@ -20,6 +20,9 @@ struct itimerspec64 {
|
||
|
struct timespec64 it_value;
|
||
|
};
|
||
|
|
||
|
+/* Parameters used to convert the timespec values: */
|
||
|
+#define PSEC_PER_NSEC 1000L
|
||
|
+
|
||
|
/* Located here for timespec[64]_valid_strict */
|
||
|
#define TIME64_MAX ((s64)~((u64)1 << 63))
|
||
|
#define TIME64_MIN (-TIME64_MAX - 1)
|
||
|
diff --git a/include/linux/udp.h b/include/linux/udp.h
|
||
|
index 254a2654400f8..e96da4157d04d 100644
|
||
|
--- a/include/linux/udp.h
|
||
|
+++ b/include/linux/udp.h
|
||
|
@@ -70,6 +70,7 @@ struct udp_sock {
|
||
|
* For encapsulation sockets.
|
||
|
*/
|
||
|
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
|
||
|
+ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
|
||
|
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
|
||
|
void (*encap_destroy)(struct sock *sk);
|
||
|
|
||
|
diff --git a/include/net/bonding.h b/include/net/bonding.h
|
||
|
index cb904d356e31e..3b816ae8b1f3b 100644
|
||
|
--- a/include/net/bonding.h
|
||
|
+++ b/include/net/bonding.h
|
||
|
@@ -161,8 +161,9 @@ struct slave {
|
||
|
struct net_device *dev; /* first - useful for panic debug */
|
||
|
struct bonding *bond; /* our master */
|
||
|
int delay;
|
||
|
- /* all three in jiffies */
|
||
|
+ /* all 4 in jiffies */
|
||
|
unsigned long last_link_up;
|
||
|
+ unsigned long last_tx;
|
||
|
unsigned long last_rx;
|
||
|
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
|
||
|
s8 link; /* one of BOND_LINK_XXXX */
|
||
|
@@ -539,6 +540,16 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
|
||
|
return slave->last_rx;
|
||
|
}
|
||
|
|
||
|
+static inline void slave_update_last_tx(struct slave *slave)
|
||
|
+{
|
||
|
+ WRITE_ONCE(slave->last_tx, jiffies);
|
||
|
+}
|
||
|
+
|
||
|
+static inline unsigned long slave_last_tx(struct slave *slave)
|
||
|
+{
|
||
|
+ return READ_ONCE(slave->last_tx);
|
||
|
+}
|
||
|
+
|
||
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||
|
static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave,
|
||
|
struct sk_buff *skb)
|
||
|
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
|
||
|
index afc7ce713657b..72394f441dad8 100644
|
||
|
--- a/include/net/udp_tunnel.h
|
||
|
+++ b/include/net/udp_tunnel.h
|
||
|
@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net,
|
||
|
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
|
||
|
typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
|
||
|
struct sk_buff *skb);
|
||
|
+typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
|
||
|
+ struct sk_buff *skb,
|
||
|
+ unsigned int udp_offset);
|
||
|
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
|
||
|
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
|
||
|
struct list_head *head,
|
||
|
@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg {
|
||
|
__u8 encap_type;
|
||
|
udp_tunnel_encap_rcv_t encap_rcv;
|
||
|
udp_tunnel_encap_err_lookup_t encap_err_lookup;
|
||
|
+ udp_tunnel_encap_err_rcv_t encap_err_rcv;
|
||
|
udp_tunnel_encap_destroy_t encap_destroy;
|
||
|
udp_tunnel_gro_receive_t gro_receive;
|
||
|
udp_tunnel_gro_complete_t gro_complete;
|
||
|
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
|
||
|
index 9e17247474fa9..6ce3bd22f6c69 100644
|
||
|
--- a/include/soc/at91/sama7-ddr.h
|
||
|
+++ b/include/soc/at91/sama7-ddr.h
|
||
|
@@ -38,6 +38,14 @@
|
||
|
#define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */
|
||
|
|
||
|
#define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */
|
||
|
+#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */
|
||
|
+#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */
|
||
|
+#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */
|
||
|
+#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */
|
||
|
+
|
||
|
+#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */
|
||
|
+#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */
|
||
|
+#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */
|
||
|
|
||
|
/* UDDRC */
|
||
|
#define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */
|
||
|
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
|
||
|
index cd155b7e1346d..48833d0edd089 100644
|
||
|
--- a/io_uring/io_uring.c
|
||
|
+++ b/io_uring/io_uring.c
|
||
|
@@ -4878,6 +4878,10 @@ static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
||
|
if (!req->file->f_op->uring_cmd)
|
||
|
return -EOPNOTSUPP;
|
||
|
|
||
|
+ ret = security_uring_cmd(ioucmd);
|
||
|
+ if (ret)
|
||
|
+ return ret;
|
||
|
+
|
||
|
if (ctx->flags & IORING_SETUP_SQE128)
|
||
|
issue_flags |= IO_URING_F_SQE128;
|
||
|
if (ctx->flags & IORING_SETUP_CQE32)
|
||
|
@@ -8260,6 +8264,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
|
||
|
|
||
|
switch (io_arm_poll_handler(req, 0)) {
|
||
|
case IO_APOLL_READY:
|
||
|
+ io_kbuf_recycle(req, 0);
|
||
|
io_req_task_queue(req);
|
||
|
break;
|
||
|
case IO_APOLL_ABORTED:
|
||
|
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
|
||
|
index ce95aee05e8ae..e702ca368539a 100644
|
||
|
--- a/kernel/cgroup/cgroup.c
|
||
|
+++ b/kernel/cgroup/cgroup.c
|
||
|
@@ -2346,6 +2346,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
|
||
|
}
|
||
|
EXPORT_SYMBOL_GPL(task_cgroup_path);
|
||
|
|
||
|
+/**
|
||
|
+ * cgroup_attach_lock - Lock for ->attach()
|
||
|
+ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
|
||
|
+ *
|
||
|
+ * cgroup migration sometimes needs to stabilize threadgroups against forks and
|
||
|
+ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
|
||
|
+ * implementations (e.g. cpuset), also need to disable CPU hotplug.
|
||
|
+ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
|
||
|
+ * lead to deadlocks.
|
||
|
+ *
|
||
|
+ * Bringing up a CPU may involve creating and destroying tasks which requires
|
||
|
+ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
|
||
|
+ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
|
||
|
+ * write-locking threadgroup_rwsem, the locking order is reversed and we end up
|
||
|
+ * waiting for an on-going CPU hotplug operation which in turn is waiting for
|
||
|
+ * the threadgroup_rwsem to be released to create new tasks. For more details:
|
||
|
+ *
|
||
|
+ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
|
||
|
+ *
|
||
|
+ * Resolve the situation by always acquiring cpus_read_lock() before optionally
|
||
|
+ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
|
||
|
+ * CPU hotplug is disabled on entry.
|
||
|
+ */
|
||
|
+static void cgroup_attach_lock(bool lock_threadgroup)
|
||
|
+{
|
||
|
+ cpus_read_lock();
|
||
|
+ if (lock_threadgroup)
|
||
|
+ percpu_down_write(&cgroup_threadgroup_rwsem);
|
||
|
+}
|
||
|
+
|
||
|
+/**
|
||
|
+ * cgroup_attach_unlock - Undo cgroup_attach_lock()
|
||
|
+ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
|
||
|
+ */
|
||
|
+static void cgroup_attach_unlock(bool lock_threadgroup)
|
||
|
+{
|
||
|
+ if (lock_threadgroup)
|
||
|
+ percpu_up_write(&cgroup_threadgroup_rwsem);
|
||
|
+ cpus_read_unlock();
|
||
|
+}
|
||
|
+
|
||
|
/**
|
||
|
* cgroup_migrate_add_task - add a migration target task to a migration context
|
||
|
* @task: target task
|
||
|
@@ -2822,8 +2863,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
|
||
|
}
|
||
|
|
||
|
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
|
||
|
- bool *locked)
|
||
|
- __acquires(&cgroup_threadgroup_rwsem)
|
||
|
+ bool *threadgroup_locked)
|
||
|
{
|
||
|
struct task_struct *tsk;
|
||
|
pid_t pid;
|
||
|
@@ -2840,12 +2880,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
|
||
|
* Therefore, we can skip the global lock.
|
||
|
*/
|
||
|
lockdep_assert_held(&cgroup_mutex);
|
||
|
- if (pid || threadgroup) {
|
||
|
- percpu_down_write(&cgroup_threadgroup_rwsem);
|
||
|
- *locked = true;
|
||
|
- } else {
|
||
|
- *locked = false;
|
||
|
- }
|
||
|
+ *threadgroup_locked = pid || threadgroup;
|
||
|
+ cgroup_attach_lock(*threadgroup_locked);
|
||
|
|
||
|
rcu_read_lock();
|
||
|
if (pid) {
|
||
|
@@ -2876,17 +2912,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
|
||
|
goto out_unlock_rcu;
|
||
|
|
||
|
out_unlock_threadgroup:
|
||
|
- if (*locked) {
|
||
|
- percpu_up_write(&cgroup_threadgroup_rwsem);
|
||
|
- *locked = false;
|
||
|
- }
|
||
|
+ cgroup_attach_unlock(*threadgroup_locked);
|
||
|
+ *threadgroup_locked = false;
|
||
|
out_unlock_rcu:
|
||
|
rcu_read_unlock();
|
||
|
return tsk;
|
||
|
}
|
||
|
|
||
|
-void cgroup_procs_write_finish(struct task_struct *task, bool locked)
|
||
|
- __releases(&cgroup_threadgroup_rwsem)
|
||
|
+void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked)
|
||
|
{
|
||
|
struct cgroup_subsys *ss;
|
||
|
int ssid;
|
||
|
@@ -2894,8 +2927,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked)
|
||
|
/* release reference from cgroup_procs_write_start() */
|
||
|
put_task_struct(task);
|
||
|
|
||
|
- if (locked)
|
||
|
- percpu_up_write(&cgroup_threadgroup_rwsem);
|
||
|
+ cgroup_attach_unlock(threadgroup_locked);
|
||
|
+
|
||
|
for_each_subsys(ss, ssid)
|
||
|
if (ss->post_attach)
|
||
|
ss->post_attach();
|
||
|
@@ -2950,12 +2983,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
|
||
|
struct cgroup_subsys_state *d_css;
|
||
|
struct cgroup *dsct;
|
||
|
struct css_set *src_cset;
|
||
|
+ bool has_tasks;
|
||
|
int ret;
|
||
|
|
||
|
lockdep_assert_held(&cgroup_mutex);
|
||
|
|
||
|
- percpu_down_write(&cgroup_threadgroup_rwsem);
|
||
|
-
|
||
|
/* look up all csses currently attached to @cgrp's subtree */
|
||
|
spin_lock_irq(&css_set_lock);
|
||
|
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
|
||
|
@@ -2966,6 +2998,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
|
||
|
}
|
||
|
spin_unlock_irq(&css_set_lock);
|
||
|
|
||
|
+ /*
|
||
|
+ * We need to write-lock threadgroup_rwsem while migrating tasks.
|
||
|
+ * However, if there are no source csets for @cgrp, changing its
|
||
|
+ * controllers isn't gonna produce any task migrations and the
|
||
|
+ * write-locking can be skipped safely.
|
||
|
+ */
|
||
|
+ has_tasks = !list_empty(&mgctx.preloaded_src_csets);
|
||
|
+ cgroup_attach_lock(has_tasks);
|
||
|
+
|
||
|
/* NULL dst indicates self on default hierarchy */
|
||
|
ret = cgroup_migrate_prepare_dst(&mgctx);
|
||
|
if (ret)
|
||
|
@@ -2985,7 +3026,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
|
||
|
ret = cgroup_migrate_execute(&mgctx);
|
||
|
out_finish:
|
||
|
cgroup_migrate_finish(&mgctx);
|
||
|
- percpu_up_write(&cgroup_threadgroup_rwsem);
|
||
|
+ cgroup_attach_unlock(has_tasks);
|
||
|
return ret;
|
||
|
}
|
||
|
|
||
|
@@ -4933,13 +4974,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
|
||
|
struct task_struct *task;
|
||
|
const struct cred *saved_cred;
|
||
|
ssize_t ret;
|
||
|
- bool locked;
|
||
|
+ bool threadgroup_locked;
|
||
|
|
||
|
dst_cgrp = cgroup_kn_lock_live(of->kn, false);
|
||
|
if (!dst_cgrp)
|
||
|
return -ENODEV;
|
||
|
|
||
|
- task = cgroup_procs_write_start(buf, threadgroup, &locked);
|
||
|
+ task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
|
||
|
ret = PTR_ERR_OR_ZERO(task);
|
||
|
if (ret)
|
||
|
goto out_unlock;
|
||
|
@@ -4965,7 +5006,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
|
||
|
ret = cgroup_attach_task(dst_cgrp, task, threadgroup);
|
||
|
|
||
|
out_finish:
|
||
|
- cgroup_procs_write_finish(task, locked);
|
||
|
+ cgroup_procs_write_finish(task, threadgroup_locked);
|
||
|
out_unlock:
|
||
|
cgroup_kn_unlock(of->kn);
|
||
|
|
||
|
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
|
||
|
index 58aadfda9b8b3..1f3a55297f39d 100644
|
||
|
--- a/kernel/cgroup/cpuset.c
|
||
|
+++ b/kernel/cgroup/cpuset.c
|
||
|
@@ -2289,7 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
|
||
|
cgroup_taskset_first(tset, &css);
|
||
|
cs = css_cs(css);
|
||
|
|
||
|
- cpus_read_lock();
|
||
|
+ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
|
||
|
percpu_down_write(&cpuset_rwsem);
|
||
|
|
||
|
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
|
||
|
@@ -2343,7 +2343,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
|
||
|
wake_up(&cpuset_attach_wq);
|
||
|
|
||
|
percpu_up_write(&cpuset_rwsem);
|
||
|
- cpus_read_unlock();
|
||
|
}
|
||
|
|
||
|
/* The various types of files and directories in a cpuset file system */
|
||
|
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
|
||
|
index 5830dce6081b3..ce34d50f7a9bb 100644
|
||
|
--- a/kernel/dma/swiotlb.c
|
||
|
+++ b/kernel/dma/swiotlb.c
|
||
|
@@ -464,7 +464,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
|
||
|
+static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
|
||
|
+{
|
||
|
+ return start + (idx << IO_TLB_SHIFT);
|
||
|
+}
|
||
|
|
||
|
/*
|
||
|
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
|
||
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
||
|
index 9d44f2d46c696..d587c85f35b1e 100644
|
||
|
--- a/kernel/fork.c
|
||
|
+++ b/kernel/fork.c
|
||
|
@@ -1225,6 +1225,7 @@ void mmput_async(struct mm_struct *mm)
|
||
|
schedule_work(&mm->async_put_work);
|
||
|
}
|
||
|
}
|
||
|
+EXPORT_SYMBOL_GPL(mmput_async);
|
||
|
#endif
|
||
|
|
||
|
/**
|
||
|
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
|
||
|
index 08350e35aba24..ca9d834d0b843 100644
|
||
|
--- a/kernel/kprobes.c
|
||
|
+++ b/kernel/kprobes.c
|
||
|
@@ -1562,6 +1562,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||
|
/* Ensure it is not in reserved area nor out of text */
|
||
|
if (!(core_kernel_text((unsigned long) p->addr) ||
|
||
|
is_module_text_address((unsigned long) p->addr)) ||
|
||
|
+ in_gate_area_no_mm((unsigned long) p->addr) ||
|
||
|
within_kprobe_blacklist((unsigned long) p->addr) ||
|
||
|
jump_label_text_reserved(p->addr, p->addr) ||
|
||
|
static_call_text_reserved(p->addr, p->addr) ||
|
||
|
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
|
||
|
index bb3d63bdf4ae8..667876da8382d 100644
|
||
|
--- a/kernel/sched/debug.c
|
||
|
+++ b/kernel/sched/debug.c
|
||
|
@@ -416,7 +416,7 @@ void update_sched_domain_debugfs(void)
|
||
|
char buf[32];
|
||
|
|
||
|
snprintf(buf, sizeof(buf), "cpu%d", cpu);
|
||
|
- debugfs_remove(debugfs_lookup(buf, sd_dentry));
|
||
|
+ debugfs_lookup_and_remove(buf, sd_dentry);
|
||
|
d_cpu = debugfs_create_dir(buf, sd_dentry);
|
||
|
|
||
|
i = 0;
|
||
|
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
|
||
|
index cb866c3141af2..918730d749325 100644
|
||
|
--- a/kernel/trace/trace_events_trigger.c
|
||
|
+++ b/kernel/trace/trace_events_trigger.c
|
||
|
@@ -142,7 +142,8 @@ static bool check_user_trigger(struct trace_event_file *file)
|
||
|
{
|
||
|
struct event_trigger_data *data;
|
||
|
|
||
|
- list_for_each_entry_rcu(data, &file->triggers, list) {
|
||
|
+ list_for_each_entry_rcu(data, &file->triggers, list,
|
||
|
+ lockdep_is_held(&event_mutex)) {
|
||
|
if (data->flags & EVENT_TRIGGER_FL_PROBE)
|
||
|
continue;
|
||
|
return true;
|
||
|
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
|
||
|
index 95b58bd757ce4..1e130da1b742c 100644
|
||
|
--- a/kernel/trace/trace_preemptirq.c
|
||
|
+++ b/kernel/trace/trace_preemptirq.c
|
||
|
@@ -95,14 +95,14 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
|
||
|
}
|
||
|
|
||
|
lockdep_hardirqs_on_prepare();
|
||
|
- lockdep_hardirqs_on(CALLER_ADDR0);
|
||
|
+ lockdep_hardirqs_on(caller_addr);
|
||
|
}
|
||
|
EXPORT_SYMBOL(trace_hardirqs_on_caller);
|
||
|
NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
|
||
|
|
||
|
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
|
||
|
{
|
||
|
- lockdep_hardirqs_off(CALLER_ADDR0);
|
||
|
+ lockdep_hardirqs_off(caller_addr);
|
||
|
|
||
|
if (!this_cpu_read(tracing_irq_cpu)) {
|
||
|
this_cpu_write(tracing_irq_cpu, 1);
|
||
|
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
|
||
|
index a182f5ddaf68b..acd7cbb82e160 100644
|
||
|
--- a/mm/kmemleak.c
|
||
|
+++ b/mm/kmemleak.c
|
||
|
@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
|
||
|
void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
|
||
|
gfp_t gfp)
|
||
|
{
|
||
|
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||
|
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||
|
kmemleak_alloc(__va(phys), size, min_count, gfp);
|
||
|
}
|
||
|
EXPORT_SYMBOL(kmemleak_alloc_phys);
|
||
|
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
|
||
|
*/
|
||
|
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
|
||
|
{
|
||
|
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||
|
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||
|
kmemleak_free_part(__va(phys), size);
|
||
|
}
|
||
|
EXPORT_SYMBOL(kmemleak_free_part_phys);
|
||
|
@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
|
||
|
*/
|
||
|
void __ref kmemleak_not_leak_phys(phys_addr_t phys)
|
||
|
{
|
||
|
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||
|
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||
|
kmemleak_not_leak(__va(phys));
|
||
|
}
|
||
|
EXPORT_SYMBOL(kmemleak_not_leak_phys);
|
||
|
@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
|
||
|
*/
|
||
|
void __ref kmemleak_ignore_phys(phys_addr_t phys)
|
||
|
{
|
||
|
- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
|
||
|
+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
|
||
|
kmemleak_ignore(__va(phys));
|
||
|
}
|
||
|
EXPORT_SYMBOL(kmemleak_ignore_phys);
|
||
|
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
|
||
|
index ff47790366497..f20f4373ff408 100644
|
||
|
--- a/net/bridge/br_netfilter_hooks.c
|
||
|
+++ b/net/bridge/br_netfilter_hooks.c
|
||
|
@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
|
||
|
/* - Bridged-and-DNAT'ed traffic doesn't
|
||
|
* require ip_forwarding. */
|
||
|
if (rt->dst.dev == dev) {
|
||
|
+ skb_dst_drop(skb);
|
||
|
skb_dst_set(skb, &rt->dst);
|
||
|
goto bridged_dnat;
|
||
|
}
|
||
|
@@ -413,6 +414,7 @@ bridged_dnat:
|
||
|
kfree_skb(skb);
|
||
|
return 0;
|
||
|
}
|
||
|
+ skb_dst_drop(skb);
|
||
|
skb_dst_set_noref(skb, &rt->dst);
|
||
|
}
|
||
|
|
||
|
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
|
||
|
index e4e0c836c3f51..6b07f30675bb0 100644
|
||
|
--- a/net/bridge/br_netfilter_ipv6.c
|
||
|
+++ b/net/bridge/br_netfilter_ipv6.c
|
||
|
@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
|
||
|
kfree_skb(skb);
|
||
|
return 0;
|
||
|
}
|
||
|
+ skb_dst_drop(skb);
|
||
|
skb_dst_set_noref(skb, &rt->dst);
|
||
|
}
|
||
|
|
||
|
diff --git a/net/core/datagram.c b/net/core/datagram.c
|
||
|
index 50f4faeea76cc..48e82438acb02 100644
|
||
|
--- a/net/core/datagram.c
|
||
|
+++ b/net/core/datagram.c
|
||
|
@@ -675,7 +675,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
|
||
|
page_ref_sub(last_head, refs);
|
||
|
refs = 0;
|
||
|
}
|
||
|
- skb_fill_page_desc(skb, frag++, head, start, size);
|
||
|
+ skb_fill_page_desc_noacc(skb, frag++, head, start, size);
|
||
|
}
|
||
|
if (refs)
|
||
|
page_ref_sub(last_head, refs);
|
||
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
||
|
index bebf58464d667..4b2b07a9422cf 100644
|
||
|
--- a/net/core/skbuff.c
|
||
|
+++ b/net/core/skbuff.c
|
||
|
@@ -4179,9 +4179,8 @@ normal:
|
||
|
SKB_GSO_CB(nskb)->csum_start =
|
||
|
skb_headroom(nskb) + doffset;
|
||
|
} else {
|
||
|
- skb_copy_bits(head_skb, offset,
|
||
|
- skb_put(nskb, len),
|
||
|
- len);
|
||
|
+ if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
|
||
|
+ goto err;
|
||
|
}
|
||
|
continue;
|
||
|
}
|
||
|
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
|
||
|
index 3d446773ff2a5..ab03977b65781 100644
|
||
|
--- a/net/ipv4/tcp.c
|
||
|
+++ b/net/ipv4/tcp.c
|
||
|
@@ -1015,7 +1015,7 @@ new_segment:
|
||
|
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
|
||
|
} else {
|
||
|
get_page(page);
|
||
|
- skb_fill_page_desc(skb, i, page, offset, copy);
|
||
|
+ skb_fill_page_desc_noacc(skb, i, page, offset, copy);
|
||
|
}
|
||
|
|
||
|
if (!(flags & MSG_NO_SHARED_FRAGS))
|
||
|
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
|
||
|
index e5435156e545d..c30696eafc361 100644
|
||
|
--- a/net/ipv4/tcp_input.c
|
||
|
+++ b/net/ipv4/tcp_input.c
|
||
|
@@ -2514,6 +2514,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp)
|
||
|
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
|
||
|
}
|
||
|
|
||
|
+static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
|
||
|
+{
|
||
|
+ struct tcp_sock *tp = tcp_sk(sk);
|
||
|
+
|
||
|
+ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
|
||
|
+ /* Hold old state until something *above* high_seq
|
||
|
+ * is ACKed. For Reno it is MUST to prevent false
|
||
|
+ * fast retransmits (RFC2582). SACK TCP is safe. */
|
||
|
+ if (!tcp_any_retrans_done(sk))
|
||
|
+ tp->retrans_stamp = 0;
|
||
|
+ return true;
|
||
|
+ }
|
||
|
+ return false;
|
||
|
+}
|
||
|
+
|
||
|
/* People celebrate: "We love our President!" */
|
||
|
static bool tcp_try_undo_recovery(struct sock *sk)
|
||
|
{
|
||
|
@@ -2536,14 +2551,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
|
||
|
} else if (tp->rack.reo_wnd_persist) {
|
||
|
tp->rack.reo_wnd_persist--;
|
||
|
}
|
||
|
- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
|
||
|
- /* Hold old state until something *above* high_seq
|
||
|
- * is ACKed. For Reno it is MUST to prevent false
|
||
|
- * fast retransmits (RFC2582). SACK TCP is safe. */
|
||
|
- if (!tcp_any_retrans_done(sk))
|
||
|
- tp->retrans_stamp = 0;
|
||
|
+ if (tcp_is_non_sack_preventing_reopen(sk))
|
||
|
return true;
|
||
|
- }
|
||
|
tcp_set_ca_state(sk, TCP_CA_Open);
|
||
|
tp->is_sack_reneg = 0;
|
||
|
return false;
|
||
|
@@ -2579,6 +2588,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
|
||
|
NET_INC_STATS(sock_net(sk),
|
||
|
LINUX_MIB_TCPSPURIOUSRTOS);
|
||
|
inet_csk(sk)->icsk_retransmits = 0;
|
||
|
+ if (tcp_is_non_sack_preventing_reopen(sk))
|
||
|
+ return true;
|
||
|
if (frto_undo || tcp_is_sack(tp)) {
|
||
|
tcp_set_ca_state(sk, TCP_CA_Open);
|
||
|
tp->is_sack_reneg = 0;
|
||
|
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
|
||
|
index aa9f2ec3dc468..01e1d36bdf135 100644
|
||
|
--- a/net/ipv4/udp.c
|
||
|
+++ b/net/ipv4/udp.c
|
||
|
@@ -781,6 +781,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
|
||
|
*/
|
||
|
if (tunnel) {
|
||
|
/* ...not for tunnels though: we don't have a sending socket */
|
||
|
+ if (udp_sk(sk)->encap_err_rcv)
|
||
|
+ udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2);
|
||
|
goto out;
|
||
|
}
|
||
|
if (!inet->recverr) {
|
||
|
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
|
||
|
index 8efaf8c3fe2a9..8242c8947340e 100644
|
||
|
--- a/net/ipv4/udp_tunnel_core.c
|
||
|
+++ b/net/ipv4/udp_tunnel_core.c
|
||
|
@@ -72,6 +72,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
|
||
|
|
||
|
udp_sk(sk)->encap_type = cfg->encap_type;
|
||
|
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
|
||
|
+ udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
|
||
|
udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
|
||
|
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
|
||
|
udp_sk(sk)->gro_receive = cfg->gro_receive;
|
||
|
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
|
||
|
index b738eb7e1cae8..04cf06866e765 100644
|
||
|
--- a/net/ipv6/addrconf.c
|
||
|
+++ b/net/ipv6/addrconf.c
|
||
|
@@ -3557,11 +3557,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
||
|
fallthrough;
|
||
|
case NETDEV_UP:
|
||
|
case NETDEV_CHANGE:
|
||
|
- if (dev->flags & IFF_SLAVE)
|
||
|
+ if (idev && idev->cnf.disable_ipv6)
|
||
|
break;
|
||
|
|
||
|
- if (idev && idev->cnf.disable_ipv6)
|
||
|
+ if (dev->flags & IFF_SLAVE) {
|
||
|
+ if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
|
||
|
+ dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
|
||
|
+ ipv6_mc_up(idev);
|
||
|
break;
|
||
|
+ }
|
||
|
|
||
|
if (event == NETDEV_UP) {
|
||
|
/* restore routes for permanent addresses */
|
||
|
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
|
||
|
index 73aaabf0e9665..0b0e34ddc64e0 100644
|
||
|
--- a/net/ipv6/seg6.c
|
||
|
+++ b/net/ipv6/seg6.c
|
||
|
@@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
|
||
|
goto out_unlock;
|
||
|
}
|
||
|
|
||
|
+ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
|
||
|
+ err = -EINVAL;
|
||
|
+ goto out_unlock;
|
||
|
+ }
|
||
|
+
|
||
|
if (hinfo) {
|
||
|
err = seg6_hmac_info_del(net, hmackeyid);
|
||
|
if (err)
|
||
|
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
|
||
|
index e2f2e087a7531..40074bc7274ea 100644
|
||
|
--- a/net/ipv6/udp.c
|
||
|
+++ b/net/ipv6/udp.c
|
||
|
@@ -616,8 +616,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||
|
}
|
||
|
|
||
|
/* Tunnels don't have an application socket: don't pass errors back */
|
||
|
- if (tunnel)
|
||
|
+ if (tunnel) {
|
||
|
+ if (udp_sk(sk)->encap_err_rcv)
|
||
|
+ udp_sk(sk)->encap_err_rcv(sk, skb, offset);
|
||
|
goto out;
|
||
|
+ }
|
||
|
|
||
|
if (!np->recverr) {
|
||
|
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
|
||
|
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
|
||
|
index 1796c456ac98b..992decbcaa5c1 100644
|
||
|
--- a/net/netfilter/nf_conntrack_irc.c
|
||
|
+++ b/net/netfilter/nf_conntrack_irc.c
|
||
|
@@ -194,8 +194,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
|
||
|
|
||
|
/* dcc_ip can be the internal OR external (NAT'ed) IP */
|
||
|
tuple = &ct->tuplehash[dir].tuple;
|
||
|
- if (tuple->src.u3.ip != dcc_ip &&
|
||
|
- tuple->dst.u3.ip != dcc_ip) {
|
||
|
+ if ((tuple->src.u3.ip != dcc_ip &&
|
||
|
+ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
|
||
|
+ dcc_port == 0) {
|
||
|
net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
|
||
|
&tuple->src.u3.ip,
|
||
|
&dcc_ip, dcc_port);
|
||
|
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
|
||
|
index a63b51dceaf2c..a634c72b1ffcf 100644
|
||
|
--- a/net/netfilter/nf_conntrack_proto_tcp.c
|
||
|
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
|
||
|
@@ -655,6 +655,37 @@ static bool tcp_in_window(struct nf_conn *ct,
|
||
|
tn->tcp_be_liberal)
|
||
|
res = true;
|
||
|
if (!res) {
|
||
|
+ bool seq_ok = before(seq, sender->td_maxend + 1);
|
||
|
+
|
||
|
+ if (!seq_ok) {
|
||
|
+ u32 overshot = end - sender->td_maxend + 1;
|
||
|
+ bool ack_ok;
|
||
|
+
|
||
|
+ ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
|
||
|
+
|
||
|
+ if (in_recv_win &&
|
||
|
+ ack_ok &&
|
||
|
+ overshot <= receiver->td_maxwin &&
|
||
|
+ before(sack, receiver->td_end + 1)) {
|
||
|
+ /* Work around TCPs that send more bytes than allowed by
|
||
|
+ * the receive window.
|
||
|
+ *
|
||
|
+ * If the (marked as invalid) packet is allowed to pass by
|
||
|
+ * the ruleset and the peer acks this data, then its possible
|
||
|
+ * all future packets will trigger 'ACK is over upper bound' check.
|
||
|
+ *
|
||
|
+ * Thus if only the sequence check fails then do update td_end so
|
||
|
+ * possible ACK for this data can update internal state.
|
||
|
+ */
|
||
|
+ sender->td_end = end;
|
||
|
+ sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
|
||
|
+
|
||
|
+ nf_ct_l4proto_log_invalid(skb, ct, hook_state,
|
||
|
+ "%u bytes more than expected", overshot);
|
||
|
+ return res;
|
||
|
+ }
|
||
|
+ }
|
||
|
+
|
||
|
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
|
||
|
"%s",
|
||
|
before(seq, sender->td_maxend + 1) ?
|
||
|
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
|
||
|
index bc690238a3c56..848cc81d69926 100644
|
||
|
--- a/net/netfilter/nf_tables_api.c
|
||
|
+++ b/net/netfilter/nf_tables_api.c
|
||
|
@@ -2166,8 +2166,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
|
||
|
chain->flags |= NFT_CHAIN_BASE | flags;
|
||
|
basechain->policy = NF_ACCEPT;
|
||
|
if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
|
||
|
- !nft_chain_offload_support(basechain))
|
||
|
+ !nft_chain_offload_support(basechain)) {
|
||
|
+ list_splice_init(&basechain->hook_list, &hook->list);
|
||
|
return -EOPNOTSUPP;
|
||
|
+ }
|
||
|
|
||
|
flow_block_init(&basechain->flow_block);
|
||
|
|
||
|
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
|
||
|
index 571436064cd6f..62c70709d7980 100644
|
||
|
--- a/net/rxrpc/ar-internal.h
|
||
|
+++ b/net/rxrpc/ar-internal.h
|
||
|
@@ -982,6 +982,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
|
||
|
/*
|
||
|
* peer_event.c
|
||
|
*/
|
||
|
+void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
|
||
|
void rxrpc_error_report(struct sock *);
|
||
|
void rxrpc_peer_keepalive_worker(struct work_struct *);
|
||
|
|
||
|
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
|
||
|
index 96ecb7356c0fe..79bb02eb67b2b 100644
|
||
|
--- a/net/rxrpc/local_object.c
|
||
|
+++ b/net/rxrpc/local_object.c
|
||
|
@@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
|
||
|
|
||
|
tuncfg.encap_type = UDP_ENCAP_RXRPC;
|
||
|
tuncfg.encap_rcv = rxrpc_input_packet;
|
||
|
+ tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
|
||
|
tuncfg.sk_user_data = local;
|
||
|
setup_udp_tunnel_sock(net, local->socket, &tuncfg);
|
||
|
|
||
|
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
|
||
|
index be032850ae8ca..32561e9567fe3 100644
|
||
|
--- a/net/rxrpc/peer_event.c
|
||
|
+++ b/net/rxrpc/peer_event.c
|
||
|
@@ -16,22 +16,105 @@
|
||
|
#include <net/sock.h>
|
||
|
#include <net/af_rxrpc.h>
|
||
|
#include <net/ip.h>
|
||
|
+#include <net/icmp.h>
|
||
|
#include "ar-internal.h"
|
||
|
|
||
|
+static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int);
|
||
|
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
|
||
|
static void rxrpc_distribute_error(struct rxrpc_peer *, int,
|
||
|
enum rxrpc_call_completion);
|
||
|
|
||
|
/*
|
||
|
- * Find the peer associated with an ICMP packet.
|
||
|
+ * Find the peer associated with an ICMPv4 packet.
|
||
|
*/
|
||
|
static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
|
||
|
- const struct sk_buff *skb,
|
||
|
+ struct sk_buff *skb,
|
||
|
+ unsigned int udp_offset,
|
||
|
+ unsigned int *info,
|
||
|
struct sockaddr_rxrpc *srx)
|
||
|
{
|
||
|
- struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
|
||
|
+ struct iphdr *ip, *ip0 = ip_hdr(skb);
|
||
|
+ struct icmphdr *icmp = icmp_hdr(skb);
|
||
|
+ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
|
||
|
|
||
|
- _enter("");
|
||
|
+ _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code);
|
||
|
+
|
||
|
+ switch (icmp->type) {
|
||
|
+ case ICMP_DEST_UNREACH:
|
||
|
+ *info = ntohs(icmp->un.frag.mtu);
|
||
|
+ fallthrough;
|
||
|
+ case ICMP_TIME_EXCEEDED:
|
||
|
+ case ICMP_PARAMETERPROB:
|
||
|
+ ip = (struct iphdr *)((void *)icmp + 8);
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ memset(srx, 0, sizeof(*srx));
|
||
|
+ srx->transport_type = local->srx.transport_type;
|
||
|
+ srx->transport_len = local->srx.transport_len;
|
||
|
+ srx->transport.family = local->srx.transport.family;
|
||
|
+
|
||
|
+ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
|
||
|
+ * versa?
|
||
|
+ */
|
||
|
+ switch (srx->transport.family) {
|
||
|
+ case AF_INET:
|
||
|
+ srx->transport_len = sizeof(srx->transport.sin);
|
||
|
+ srx->transport.family = AF_INET;
|
||
|
+ srx->transport.sin.sin_port = udp->dest;
|
||
|
+ memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
|
||
|
+ sizeof(struct in_addr));
|
||
|
+ break;
|
||
|
+
|
||
|
+#ifdef CONFIG_AF_RXRPC_IPV6
|
||
|
+ case AF_INET6:
|
||
|
+ srx->transport_len = sizeof(srx->transport.sin);
|
||
|
+ srx->transport.family = AF_INET;
|
||
|
+ srx->transport.sin.sin_port = udp->dest;
|
||
|
+ memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
|
||
|
+ sizeof(struct in_addr));
|
||
|
+ break;
|
||
|
+#endif
|
||
|
+
|
||
|
+ default:
|
||
|
+ WARN_ON_ONCE(1);
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ _net("ICMP {%pISp}", &srx->transport);
|
||
|
+ return rxrpc_lookup_peer_rcu(local, srx);
|
||
|
+}
|
||
|
+
|
||
|
+#ifdef CONFIG_AF_RXRPC_IPV6
|
||
|
+/*
|
||
|
+ * Find the peer associated with an ICMPv6 packet.
|
||
|
+ */
|
||
|
+static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local,
|
||
|
+ struct sk_buff *skb,
|
||
|
+ unsigned int udp_offset,
|
||
|
+ unsigned int *info,
|
||
|
+ struct sockaddr_rxrpc *srx)
|
||
|
+{
|
||
|
+ struct icmp6hdr *icmp = icmp6_hdr(skb);
|
||
|
+ struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb);
|
||
|
+ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
|
||
|
+
|
||
|
+ _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code);
|
||
|
+
|
||
|
+ switch (icmp->icmp6_type) {
|
||
|
+ case ICMPV6_DEST_UNREACH:
|
||
|
+ *info = ntohl(icmp->icmp6_mtu);
|
||
|
+ fallthrough;
|
||
|
+ case ICMPV6_PKT_TOOBIG:
|
||
|
+ case ICMPV6_TIME_EXCEED:
|
||
|
+ case ICMPV6_PARAMPROB:
|
||
|
+ ip = (struct ipv6hdr *)((void *)icmp + 8);
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
|
||
|
memset(srx, 0, sizeof(*srx));
|
||
|
srx->transport_type = local->srx.transport_type;
|
||
|
@@ -41,6 +124,165 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
|
||
|
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
|
||
|
* versa?
|
||
|
*/
|
||
|
+ switch (srx->transport.family) {
|
||
|
+ case AF_INET:
|
||
|
+ _net("Rx ICMP6 on v4 sock");
|
||
|
+ srx->transport_len = sizeof(srx->transport.sin);
|
||
|
+ srx->transport.family = AF_INET;
|
||
|
+ srx->transport.sin.sin_port = udp->dest;
|
||
|
+ memcpy(&srx->transport.sin.sin_addr,
|
||
|
+ &ip->daddr.s6_addr32[3], sizeof(struct in_addr));
|
||
|
+ break;
|
||
|
+ case AF_INET6:
|
||
|
+ _net("Rx ICMP6");
|
||
|
+ srx->transport.sin.sin_port = udp->dest;
|
||
|
+ memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr,
|
||
|
+ sizeof(struct in6_addr));
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ WARN_ON_ONCE(1);
|
||
|
+ return NULL;
|
||
|
+ }
|
||
|
+
|
||
|
+ _net("ICMP {%pISp}", &srx->transport);
|
||
|
+ return rxrpc_lookup_peer_rcu(local, srx);
|
||
|
+}
|
||
|
+#endif /* CONFIG_AF_RXRPC_IPV6 */
|
||
|
+
|
||
|
+/*
|
||
|
+ * Handle an error received on the local endpoint as a tunnel.
|
||
|
+ */
|
||
|
+void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb,
|
||
|
+ unsigned int udp_offset)
|
||
|
+{
|
||
|
+ struct sock_extended_err ee;
|
||
|
+ struct sockaddr_rxrpc srx;
|
||
|
+ struct rxrpc_local *local;
|
||
|
+ struct rxrpc_peer *peer;
|
||
|
+ unsigned int info = 0;
|
||
|
+ int err;
|
||
|
+ u8 version = ip_hdr(skb)->version;
|
||
|
+ u8 type = icmp_hdr(skb)->type;
|
||
|
+ u8 code = icmp_hdr(skb)->code;
|
||
|
+
|
||
|
+ rcu_read_lock();
|
||
|
+ local = rcu_dereference_sk_user_data(sk);
|
||
|
+ if (unlikely(!local)) {
|
||
|
+ rcu_read_unlock();
|
||
|
+ return;
|
||
|
+ }
|
||
|
+
|
||
|
+ rxrpc_new_skb(skb, rxrpc_skb_received);
|
||
|
+
|
||
|
+ switch (ip_hdr(skb)->version) {
|
||
|
+ case IPVERSION:
|
||
|
+ peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset,
|
||
|
+ &info, &srx);
|
||
|
+ break;
|
||
|
+#ifdef CONFIG_AF_RXRPC_IPV6
|
||
|
+ case 6:
|
||
|
+ peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset,
|
||
|
+ &info, &srx);
|
||
|
+ break;
|
||
|
+#endif
|
||
|
+ default:
|
||
|
+ rcu_read_unlock();
|
||
|
+ return;
|
||
|
+ }
|
||
|
+
|
||
|
+ if (peer && !rxrpc_get_peer_maybe(peer))
|
||
|
+ peer = NULL;
|
||
|
+ if (!peer) {
|
||
|
+ rcu_read_unlock();
|
||
|
+ return;
|
||
|
+ }
|
||
|
+
|
||
|
+ memset(&ee, 0, sizeof(ee));
|
||
|
+
|
||
|
+ switch (version) {
|
||
|
+ case IPVERSION:
|
||
|
+ switch (type) {
|
||
|
+ case ICMP_DEST_UNREACH:
|
||
|
+ switch (code) {
|
||
|
+ case ICMP_FRAG_NEEDED:
|
||
|
+ rxrpc_adjust_mtu(peer, info);
|
||
|
+ rcu_read_unlock();
|
||
|
+ rxrpc_put_peer(peer);
|
||
|
+ return;
|
||
|
+ default:
|
||
|
+ break;
|
||
|
+ }
|
||
|
+
|
||
|
+ err = EHOSTUNREACH;
|
||
|
+ if (code <= NR_ICMP_UNREACH) {
|
||
|
+ /* Might want to do something different with
|
||
|
+ * non-fatal errors
|
||
|
+ */
|
||
|
+ //harderr = icmp_err_convert[code].fatal;
|
||
|
+ err = icmp_err_convert[code].errno;
|
||
|
+ }
|
||
|
+ break;
|
||
|
+
|
||
|
+ case ICMP_TIME_EXCEEDED:
|
||
|
+ err = EHOSTUNREACH;
|
||
|
+ break;
|
||
|
+ default:
|
||
|
+ err = EPROTO;
|
||
|
+ break;
|
||
|
+ }
|
||
|
+
|
||
|
+ ee.ee_origin = SO_EE_ORIGIN_ICMP;
|
||
|
+ ee.ee_type = type;
|
||
|
+ ee.ee_code = code;
|
||
|
+ ee.ee_errno = err;
|
||
|
+ break;
|
||
|
+
|
||
|
+#ifdef CONFIG_AF_RXRPC_IPV6
|
||
|
+ case 6:
|
||
|
+ switch (type) {
|
||
|
+ case ICMPV6_PKT_TOOBIG:
|
||
|
+ rxrpc_adjust_mtu(peer, info);
|
||
|
+ rcu_read_unlock();
|
||
|
+ rxrpc_put_peer(peer);
|
||
|
+ return;
|
||
|
+ }
|
||
|
+
|
||
|
+ icmpv6_err_convert(type, code, &err);
|
||
|
+
|
||
|
+ if (err == EACCES)
|
||
|
+ err = EHOSTUNREACH;
|
||
|
+
|
||
|
+ ee.ee_origin = SO_EE_ORIGIN_ICMP6;
|
||
|
+ ee.ee_type = type;
|
||
|
+ ee.ee_code = code;
|
||
|
+ ee.ee_errno = err;
|
||
|
+ break;
|
||
|
+#endif
|
||
|
+ }
|
||
|
+
|
||
|
+ trace_rxrpc_rx_icmp(peer, &ee, &srx);
|
||
|
+
|
||
|
+ rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR);
|
||
|
+ rcu_read_unlock();
|
||
|
+ rxrpc_put_peer(peer);
|
||
|
+}
|
||
|
+
|
||
|
+/*
|
||
|
+ * Find the peer associated with a local error.
|
||
|
+ */
|
||
|
+static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
|
||
|
+ const struct sk_buff *skb,
|
||
|
+ struct sockaddr_rxrpc *srx)
|
||
|
+{
|
||
|
+ struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
|
||
|
+
|
||
|
+ _enter("");
|
||
|
+
|
||
|
+ memset(srx, 0, sizeof(*srx));
|
||
|
+ srx->transport_type = local->srx.transport_type;
|
||
|
+ srx->transport_len = local->srx.transport_len;
|
||
|
+ srx->transport.family = local->srx.transport.family;
|
||
|
+
|
||
|
switch (srx->transport.family) {
|
||
|
case AF_INET:
|
||
|
srx->transport_len = sizeof(srx->transport.sin);
|
||
|
@@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
|
||
|
/*
|
||
|
* Handle an MTU/fragmentation problem.
|
||
|
*/
|
||
|
-static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
|
||
|
+static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
|
||
|
{
|
||
|
- u32 mtu = serr->ee.ee_info;
|
||
|
-
|
||
|
_net("Rx ICMP Fragmentation Needed (%d)", mtu);
|
||
|
|
||
|
/* wind down the local interface MTU */
|
||
|
@@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk)
|
||
|
struct sock_exterr_skb *serr;
|
||
|
struct sockaddr_rxrpc srx;
|
||
|
struct rxrpc_local *local;
|
||
|
- struct rxrpc_peer *peer;
|
||
|
+ struct rxrpc_peer *peer = NULL;
|
||
|
struct sk_buff *skb;
|
||
|
|
||
|
rcu_read_lock();
|
||
|
@@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk)
|
||
|
}
|
||
|
rxrpc_new_skb(skb, rxrpc_skb_received);
|
||
|
serr = SKB_EXT_ERR(skb);
|
||
|
- if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
|
||
|
- _leave("UDP empty message");
|
||
|
- rcu_read_unlock();
|
||
|
- rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||
|
- return;
|
||
|
- }
|
||
|
|
||
|
- peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
|
||
|
- if (peer && !rxrpc_get_peer_maybe(peer))
|
||
|
- peer = NULL;
|
||
|
- if (!peer) {
|
||
|
- rcu_read_unlock();
|
||
|
- rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||
|
- _leave(" [no peer]");
|
||
|
- return;
|
||
|
- }
|
||
|
-
|
||
|
- trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
|
||
|
-
|
||
|
- if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
|
||
|
- serr->ee.ee_type == ICMP_DEST_UNREACH &&
|
||
|
- serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
|
||
|
- rxrpc_adjust_mtu(peer, serr);
|
||
|
- rcu_read_unlock();
|
||
|
- rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||
|
- rxrpc_put_peer(peer);
|
||
|
- _leave(" [MTU update]");
|
||
|
- return;
|
||
|
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) {
|
||
|
+ peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
|
||
|
+ if (peer && !rxrpc_get_peer_maybe(peer))
|
||
|
+ peer = NULL;
|
||
|
+ if (peer) {
|
||
|
+ trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
|
||
|
+ rxrpc_store_error(peer, serr);
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
- rxrpc_store_error(peer, serr);
|
||
|
rcu_read_unlock();
|
||
|
rxrpc_free_skb(skb, rxrpc_skb_freed);
|
||
|
rxrpc_put_peer(peer);
|
||
|
-
|
||
|
_leave("");
|
||
|
}
|
||
|
|
||
|
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
|
||
|
index 08aab5c01437d..db47844f4ac99 100644
|
||
|
--- a/net/rxrpc/rxkad.c
|
||
|
+++ b/net/rxrpc/rxkad.c
|
||
|
@@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
|
||
|
* directly into the target buffer.
|
||
|
*/
|
||
|
sg = _sg;
|
||
|
- nsg = skb_shinfo(skb)->nr_frags;
|
||
|
+ nsg = skb_shinfo(skb)->nr_frags + 1;
|
||
|
if (nsg <= 4) {
|
||
|
nsg = 4;
|
||
|
} else {
|
||
|
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
|
||
|
index 3d061a13d7ed2..2829455211f8c 100644
|
||
|
--- a/net/sched/sch_sfb.c
|
||
|
+++ b/net/sched/sch_sfb.c
|
||
|
@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
|
||
|
}
|
||
|
}
|
||
|
|
||
|
-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
|
||
|
+static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
|
||
|
{
|
||
|
u32 sfbhash;
|
||
|
|
||
|
- sfbhash = sfb_hash(skb, 0);
|
||
|
+ sfbhash = cb->hashes[0];
|
||
|
if (sfbhash)
|
||
|
increment_one_qlen(sfbhash, 0, q);
|
||
|
|
||
|
- sfbhash = sfb_hash(skb, 1);
|
||
|
+ sfbhash = cb->hashes[1];
|
||
|
if (sfbhash)
|
||
|
increment_one_qlen(sfbhash, 1, q);
|
||
|
}
|
||
|
@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||
|
{
|
||
|
|
||
|
struct sfb_sched_data *q = qdisc_priv(sch);
|
||
|
+ unsigned int len = qdisc_pkt_len(skb);
|
||
|
struct Qdisc *child = q->qdisc;
|
||
|
struct tcf_proto *fl;
|
||
|
+ struct sfb_skb_cb cb;
|
||
|
int i;
|
||
|
u32 p_min = ~0;
|
||
|
u32 minqlen = ~0;
|
||
|
@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||
|
}
|
||
|
|
||
|
enqueue:
|
||
|
+ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
|
||
|
ret = qdisc_enqueue(skb, child, to_free);
|
||
|
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||
|
- qdisc_qstats_backlog_inc(sch, skb);
|
||
|
+ sch->qstats.backlog += len;
|
||
|
sch->q.qlen++;
|
||
|
- increment_qlen(skb, q);
|
||
|
+ increment_qlen(&cb, q);
|
||
|
} else if (net_xmit_drop_count(ret)) {
|
||
|
q->stats.childdrop++;
|
||
|
qdisc_qstats_drop(sch);
|
||
|
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
|
||
|
index b9c71a304d399..0b941dd63d268 100644
|
||
|
--- a/net/sched/sch_taprio.c
|
||
|
+++ b/net/sched/sch_taprio.c
|
||
|
@@ -18,6 +18,7 @@
|
||
|
#include <linux/module.h>
|
||
|
#include <linux/spinlock.h>
|
||
|
#include <linux/rcupdate.h>
|
||
|
+#include <linux/time.h>
|
||
|
#include <net/netlink.h>
|
||
|
#include <net/pkt_sched.h>
|
||
|
#include <net/pkt_cls.h>
|
||
|
@@ -176,7 +177,7 @@ static ktime_t get_interval_end_time(struct sched_gate_list *sched,
|
||
|
|
||
|
static int length_to_duration(struct taprio_sched *q, int len)
|
||
|
{
|
||
|
- return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
|
||
|
+ return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
|
||
|
}
|
||
|
|
||
|
/* Returns the entry corresponding to next available interval. If
|
||
|
@@ -551,7 +552,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
|
||
|
static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
|
||
|
{
|
||
|
atomic_set(&entry->budget,
|
||
|
- div64_u64((u64)entry->interval * 1000,
|
||
|
+ div64_u64((u64)entry->interval * PSEC_PER_NSEC,
|
||
|
atomic64_read(&q->picos_per_byte)));
|
||
|
}
|
||
|
|
||
|
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
|
||
|
index f40f6ed0fbdb4..1f3bb1f6b1f7b 100644
|
||
|
--- a/net/smc/smc_core.c
|
||
|
+++ b/net/smc/smc_core.c
|
||
|
@@ -755,6 +755,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
|
||
|
lnk->lgr = lgr;
|
||
|
smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
|
||
|
lnk->link_idx = link_idx;
|
||
|
+ lnk->wr_rx_id_compl = 0;
|
||
|
smc_ibdev_cnt_inc(lnk);
|
||
|
smcr_copy_dev_info_to_link(lnk);
|
||
|
atomic_set(&lnk->conn_cnt, 0);
|
||
|
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
|
||
|
index 4cb03e9423648..7b43a78c7f73a 100644
|
||
|
--- a/net/smc/smc_core.h
|
||
|
+++ b/net/smc/smc_core.h
|
||
|
@@ -115,8 +115,10 @@ struct smc_link {
|
||
|
dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
|
||
|
dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
|
||
|
u64 wr_rx_id; /* seq # of last recv WR */
|
||
|
+ u64 wr_rx_id_compl; /* seq # of last completed WR */
|
||
|
u32 wr_rx_cnt; /* number of WR recv buffers */
|
||
|
unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
|
||
|
+ wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */
|
||
|
|
||
|
struct ib_reg_wr wr_reg; /* WR register memory region */
|
||
|
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
|
||
|
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
|
||
|
index 26f8f240d9e84..b0678a417e09d 100644
|
||
|
--- a/net/smc/smc_wr.c
|
||
|
+++ b/net/smc/smc_wr.c
|
||
|
@@ -454,6 +454,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
|
||
|
|
||
|
for (i = 0; i < num; i++) {
|
||
|
link = wc[i].qp->qp_context;
|
||
|
+ link->wr_rx_id_compl = wc[i].wr_id;
|
||
|
if (wc[i].status == IB_WC_SUCCESS) {
|
||
|
link->wr_rx_tstamp = jiffies;
|
||
|
smc_wr_rx_demultiplex(&wc[i]);
|
||
|
@@ -465,6 +466,8 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
|
||
|
case IB_WC_RNR_RETRY_EXC_ERR:
|
||
|
case IB_WC_WR_FLUSH_ERR:
|
||
|
smcr_link_down_cond_sched(link);
|
||
|
+ if (link->wr_rx_id_compl == link->wr_rx_id)
|
||
|
+ wake_up(&link->wr_rx_empty_wait);
|
||
|
break;
|
||
|
default:
|
||
|
smc_wr_rx_post(link); /* refill WR RX */
|
||
|
@@ -639,6 +642,7 @@ void smc_wr_free_link(struct smc_link *lnk)
|
||
|
return;
|
||
|
ibdev = lnk->smcibdev->ibdev;
|
||
|
|
||
|
+ smc_wr_drain_cq(lnk);
|
||
|
smc_wr_wakeup_reg_wait(lnk);
|
||
|
smc_wr_wakeup_tx_wait(lnk);
|
||
|
|
||
|
@@ -889,6 +893,7 @@ int smc_wr_create_link(struct smc_link *lnk)
|
||
|
atomic_set(&lnk->wr_tx_refcnt, 0);
|
||
|
init_waitqueue_head(&lnk->wr_reg_wait);
|
||
|
atomic_set(&lnk->wr_reg_refcnt, 0);
|
||
|
+ init_waitqueue_head(&lnk->wr_rx_empty_wait);
|
||
|
return rc;
|
||
|
|
||
|
dma_unmap:
|
||
|
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
|
||
|
index a54e90a1110fd..45e9b894d3f8a 100644
|
||
|
--- a/net/smc/smc_wr.h
|
||
|
+++ b/net/smc/smc_wr.h
|
||
|
@@ -73,6 +73,11 @@ static inline void smc_wr_tx_link_put(struct smc_link *link)
|
||
|
wake_up_all(&link->wr_tx_wait);
|
||
|
}
|
||
|
|
||
|
+static inline void smc_wr_drain_cq(struct smc_link *lnk)
|
||
|
+{
|
||
|
+ wait_event(lnk->wr_rx_empty_wait, lnk->wr_rx_id_compl == lnk->wr_rx_id);
|
||
|
+}
|
||
|
+
|
||
|
static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
|
||
|
{
|
||
|
wake_up_all(&lnk->wr_tx_wait);
|
||
|
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
|
||
|
index 2f4d23238a7e3..9618e4429f0fe 100644
|
||
|
--- a/net/tipc/monitor.c
|
||
|
+++ b/net/tipc/monitor.c
|
||
|
@@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
|
||
|
|
||
|
static int map_get(u64 up_map, int i)
|
||
|
{
|
||
|
- return (up_map & (1 << i)) >> i;
|
||
|
+ return (up_map & (1ULL << i)) >> i;
|
||
|
}
|
||
|
|
||
|
static struct tipc_peer *peer_prev(struct tipc_peer *peer)
|
||
|
diff --git a/security/security.c b/security/security.c
|
||
|
index 188b8f7822206..8b62654ff3f97 100644
|
||
|
--- a/security/security.c
|
||
|
+++ b/security/security.c
|
||
|
@@ -2654,4 +2654,8 @@ int security_uring_sqpoll(void)
|
||
|
{
|
||
|
return call_int_hook(uring_sqpoll, 0);
|
||
|
}
|
||
|
+int security_uring_cmd(struct io_uring_cmd *ioucmd)
|
||
|
+{
|
||
|
+ return call_int_hook(uring_cmd, 0, ioucmd);
|
||
|
+}
|
||
|
#endif /* CONFIG_IO_URING */
|
||
|
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
|
||
|
index 1bbd53321d133..e90dfa36f79aa 100644
|
||
|
--- a/security/selinux/hooks.c
|
||
|
+++ b/security/selinux/hooks.c
|
||
|
@@ -91,6 +91,7 @@
|
||
|
#include <uapi/linux/mount.h>
|
||
|
#include <linux/fsnotify.h>
|
||
|
#include <linux/fanotify.h>
|
||
|
+#include <linux/io_uring.h>
|
||
|
|
||
|
#include "avc.h"
|
||
|
#include "objsec.h"
|
||
|
@@ -6990,6 +6991,28 @@ static int selinux_uring_sqpoll(void)
|
||
|
return avc_has_perm(&selinux_state, sid, sid,
|
||
|
SECCLASS_IO_URING, IO_URING__SQPOLL, NULL);
|
||
|
}
|
||
|
+
|
||
|
+/**
|
||
|
+ * selinux_uring_cmd - check if IORING_OP_URING_CMD is allowed
|
||
|
+ * @ioucmd: the io_uring command structure
|
||
|
+ *
|
||
|
+ * Check to see if the current domain is allowed to execute an
|
||
|
+ * IORING_OP_URING_CMD against the device/file specified in @ioucmd.
|
||
|
+ *
|
||
|
+ */
|
||
|
+static int selinux_uring_cmd(struct io_uring_cmd *ioucmd)
|
||
|
+{
|
||
|
+ struct file *file = ioucmd->file;
|
||
|
+ struct inode *inode = file_inode(file);
|
||
|
+ struct inode_security_struct *isec = selinux_inode(inode);
|
||
|
+ struct common_audit_data ad;
|
||
|
+
|
||
|
+ ad.type = LSM_AUDIT_DATA_FILE;
|
||
|
+ ad.u.file = file;
|
||
|
+
|
||
|
+ return avc_has_perm(&selinux_state, current_sid(), isec->sid,
|
||
|
+ SECCLASS_IO_URING, IO_URING__CMD, &ad);
|
||
|
+}
|
||
|
#endif /* CONFIG_IO_URING */
|
||
|
|
||
|
/*
|
||
|
@@ -7234,6 +7257,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
|
||
|
#ifdef CONFIG_IO_URING
|
||
|
LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds),
|
||
|
LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll),
|
||
|
+ LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd),
|
||
|
#endif
|
||
|
|
||
|
/*
|
||
|
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
|
||
|
index ff757ae5f2537..1c2f41ff4e551 100644
|
||
|
--- a/security/selinux/include/classmap.h
|
||
|
+++ b/security/selinux/include/classmap.h
|
||
|
@@ -253,7 +253,7 @@ const struct security_class_mapping secclass_map[] = {
|
||
|
{ "anon_inode",
|
||
|
{ COMMON_FILE_PERMS, NULL } },
|
||
|
{ "io_uring",
|
||
|
- { "override_creds", "sqpoll", NULL } },
|
||
|
+ { "override_creds", "sqpoll", "cmd", NULL } },
|
||
|
{ NULL }
|
||
|
};
|
||
|
|
||
|
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
|
||
|
index 6207762dbdb13..b30e20f64471c 100644
|
||
|
--- a/security/smack/smack_lsm.c
|
||
|
+++ b/security/smack/smack_lsm.c
|
||
|
@@ -42,6 +42,7 @@
|
||
|
#include <linux/fs_context.h>
|
||
|
#include <linux/fs_parser.h>
|
||
|
#include <linux/watch_queue.h>
|
||
|
+#include <linux/io_uring.h>
|
||
|
#include "smack.h"
|
||
|
|
||
|
#define TRANS_TRUE "TRUE"
|
||
|
@@ -4739,6 +4740,36 @@ static int smack_uring_sqpoll(void)
|
||
|
return -EPERM;
|
||
|
}
|
||
|
|
||
|
+/**
|
||
|
+ * smack_uring_cmd - check on file operations for io_uring
|
||
|
+ * @ioucmd: the command in question
|
||
|
+ *
|
||
|
+ * Make a best guess about whether a io_uring "command" should
|
||
|
+ * be allowed. Use the same logic used for determining if the
|
||
|
+ * file could be opened for read in the absence of better criteria.
|
||
|
+ */
|
||
|
+static int smack_uring_cmd(struct io_uring_cmd *ioucmd)
|
||
|
+{
|
||
|
+ struct file *file = ioucmd->file;
|
||
|
+ struct smk_audit_info ad;
|
||
|
+ struct task_smack *tsp;
|
||
|
+ struct inode *inode;
|
||
|
+ int rc;
|
||
|
+
|
||
|
+ if (!file)
|
||
|
+ return -EINVAL;
|
||
|
+
|
||
|
+ tsp = smack_cred(file->f_cred);
|
||
|
+ inode = file_inode(file);
|
||
|
+
|
||
|
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
|
||
|
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
|
||
|
+ rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
|
||
|
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
|
||
|
+
|
||
|
+ return rc;
|
||
|
+}
|
||
|
+
|
||
|
#endif /* CONFIG_IO_URING */
|
||
|
|
||
|
struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = {
|
||
|
@@ -4896,6 +4927,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
|
||
|
#ifdef CONFIG_IO_URING
|
||
|
LSM_HOOK_INIT(uring_override_creds, smack_uring_override_creds),
|
||
|
LSM_HOOK_INIT(uring_sqpoll, smack_uring_sqpoll),
|
||
|
+ LSM_HOOK_INIT(uring_cmd, smack_uring_cmd),
|
||
|
#endif
|
||
|
};
|
||
|
|
||
|
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
|
||
|
index 55b3c49ba61de..244afc38ddcaa 100644
|
||
|
--- a/sound/core/memalloc.c
|
||
|
+++ b/sound/core/memalloc.c
|
||
|
@@ -535,10 +535,13 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
|
||
|
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
|
||
|
sg_dma_address(sgt->sgl));
|
||
|
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
|
||
|
- if (p)
|
||
|
+ if (p) {
|
||
|
dmab->private_data = sgt;
|
||
|
- else
|
||
|
+ /* store the first page address for convenience */
|
||
|
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
|
||
|
+ } else {
|
||
|
dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
|
||
|
+ }
|
||
|
return p;
|
||
|
}
|
||
|
|
||
|
@@ -772,6 +775,8 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
|
||
|
if (!p)
|
||
|
goto error;
|
||
|
dmab->private_data = sgbuf;
|
||
|
+ /* store the first page address for convenience */
|
||
|
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
|
||
|
return p;
|
||
|
|
||
|
error:
|
||
|
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
|
||
|
index 90c3a367d7de9..02df915eb3c66 100644
|
||
|
--- a/sound/core/oss/pcm_oss.c
|
||
|
+++ b/sound/core/oss/pcm_oss.c
|
||
|
@@ -1672,14 +1672,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
|
||
|
runtime = substream->runtime;
|
||
|
if (atomic_read(&substream->mmap_count))
|
||
|
goto __direct;
|
||
|
- err = snd_pcm_oss_make_ready(substream);
|
||
|
- if (err < 0)
|
||
|
- return err;
|
||
|
atomic_inc(&runtime->oss.rw_ref);
|
||
|
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
|
||
|
atomic_dec(&runtime->oss.rw_ref);
|
||
|
return -ERESTARTSYS;
|
||
|
}
|
||
|
+ err = snd_pcm_oss_make_ready_locked(substream);
|
||
|
+ if (err < 0)
|
||
|
+ goto unlock;
|
||
|
format = snd_pcm_oss_format_from(runtime->oss.format);
|
||
|
width = snd_pcm_format_physical_width(format);
|
||
|
if (runtime->oss.buffer_used > 0) {
|
||
|
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
|
||
|
index 9b4a7cdb103ad..12f12a294df5a 100644
|
||
|
--- a/sound/drivers/aloop.c
|
||
|
+++ b/sound/drivers/aloop.c
|
||
|
@@ -605,17 +605,18 @@ static unsigned int loopback_jiffies_timer_pos_update
|
||
|
cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
|
||
|
struct loopback_pcm *dpcm_capt =
|
||
|
cable->streams[SNDRV_PCM_STREAM_CAPTURE];
|
||
|
- unsigned long delta_play = 0, delta_capt = 0;
|
||
|
+ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies;
|
||
|
unsigned int running, count1, count2;
|
||
|
|
||
|
+ cur_jiffies = jiffies;
|
||
|
running = cable->running ^ cable->pause;
|
||
|
if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
|
||
|
- delta_play = jiffies - dpcm_play->last_jiffies;
|
||
|
+ delta_play = cur_jiffies - dpcm_play->last_jiffies;
|
||
|
dpcm_play->last_jiffies += delta_play;
|
||
|
}
|
||
|
|
||
|
if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
|
||
|
- delta_capt = jiffies - dpcm_capt->last_jiffies;
|
||
|
+ delta_capt = cur_jiffies - dpcm_capt->last_jiffies;
|
||
|
dpcm_capt->last_jiffies += delta_capt;
|
||
|
}
|
||
|
|
||
|
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
|
||
|
index b2701a4452d86..48af77ae8020f 100644
|
||
|
--- a/sound/pci/emu10k1/emupcm.c
|
||
|
+++ b/sound/pci/emu10k1/emupcm.c
|
||
|
@@ -124,7 +124,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic
|
||
|
epcm->voices[0]->epcm = epcm;
|
||
|
if (voices > 1) {
|
||
|
for (i = 1; i < voices; i++) {
|
||
|
- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i];
|
||
|
+ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G];
|
||
|
epcm->voices[i]->epcm = epcm;
|
||
|
}
|
||
|
}
|
||
|
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
|
||
|
index a77165bd92a98..b20694fd69dea 100644
|
||
|
--- a/sound/pci/hda/hda_intel.c
|
||
|
+++ b/sound/pci/hda/hda_intel.c
|
||
|
@@ -1817,7 +1817,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
|
||
|
|
||
|
/* use the non-cached pages in non-snoop mode */
|
||
|
if (!azx_snoop(chip))
|
||
|
- azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC;
|
||
|
+ azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC_SG;
|
||
|
|
||
|
if (chip->driver_type == AZX_DRIVER_NVIDIA) {
|
||
|
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
|
||
|
diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c
|
||
|
index d243800464352..bcca1cf3cd7b6 100644
|
||
|
--- a/sound/soc/atmel/mchp-spdiftx.c
|
||
|
+++ b/sound/soc/atmel/mchp-spdiftx.c
|
||
|
@@ -196,8 +196,7 @@ struct mchp_spdiftx_dev {
|
||
|
struct clk *pclk;
|
||
|
struct clk *gclk;
|
||
|
unsigned int fmt;
|
||
|
- const struct mchp_i2s_caps *caps;
|
||
|
- int gclk_enabled:1;
|
||
|
+ unsigned int gclk_enabled:1;
|
||
|
};
|
||
|
|
||
|
static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev)
|
||
|
@@ -766,8 +765,6 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = {
|
||
|
MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids);
|
||
|
static int mchp_spdiftx_probe(struct platform_device *pdev)
|
||
|
{
|
||
|
- struct device_node *np = pdev->dev.of_node;
|
||
|
- const struct of_device_id *match;
|
||
|
struct mchp_spdiftx_dev *dev;
|
||
|
struct resource *mem;
|
||
|
struct regmap *regmap;
|
||
|
@@ -781,11 +778,6 @@ static int mchp_spdiftx_probe(struct platform_device *pdev)
|
||
|
if (!dev)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
- /* Get hardware capabilities. */
|
||
|
- match = of_match_node(mchp_spdiftx_dt_ids, np);
|
||
|
- if (match)
|
||
|
- dev->caps = match->data;
|
||
|
-
|
||
|
/* Map I/O registers. */
|
||
|
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
|
||
|
if (IS_ERR(base))
|
||
|
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
|
||
|
index 4fade23887972..8cba3015398b7 100644
|
||
|
--- a/sound/soc/codecs/cs42l42.c
|
||
|
+++ b/sound/soc/codecs/cs42l42.c
|
||
|
@@ -1618,7 +1618,6 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
|
||
|
unsigned int current_plug_status;
|
||
|
unsigned int current_button_status;
|
||
|
unsigned int i;
|
||
|
- int report = 0;
|
||
|
|
||
|
mutex_lock(&cs42l42->irq_lock);
|
||
|
if (cs42l42->suspended) {
|
||
|
@@ -1713,13 +1712,15 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
|
||
|
|
||
|
if (current_button_status & CS42L42_M_DETECT_TF_MASK) {
|
||
|
dev_dbg(cs42l42->dev, "Button released\n");
|
||
|
- report = 0;
|
||
|
+ snd_soc_jack_report(cs42l42->jack, 0,
|
||
|
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
|
||
|
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
|
||
|
} else if (current_button_status & CS42L42_M_DETECT_FT_MASK) {
|
||
|
- report = cs42l42_handle_button_press(cs42l42);
|
||
|
-
|
||
|
+ snd_soc_jack_report(cs42l42->jack,
|
||
|
+ cs42l42_handle_button_press(cs42l42),
|
||
|
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
|
||
|
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
|
||
|
}
|
||
|
- snd_soc_jack_report(cs42l42->jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
|
||
|
- SND_JACK_BTN_2 | SND_JACK_BTN_3);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
|
||
|
index 6e1184c8b672a..c48ac107810d4 100644
|
||
|
--- a/sound/soc/qcom/sm8250.c
|
||
|
+++ b/sound/soc/qcom/sm8250.c
|
||
|
@@ -270,6 +270,7 @@ static int sm8250_platform_probe(struct platform_device *pdev)
|
||
|
if (!card)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
+ card->owner = THIS_MODULE;
|
||
|
/* Allocate the private data */
|
||
|
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||
|
if (!data)
|
||
|
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
|
||
|
index 4542868cd730f..39216c09f1597 100644
|
||
|
--- a/sound/soc/sof/Kconfig
|
||
|
+++ b/sound/soc/sof/Kconfig
|
||
|
@@ -196,6 +196,7 @@ config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
|
||
|
|
||
|
config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
|
||
|
tristate "SOF enable IPC flood test"
|
||
|
+ depends on SND_SOC_SOF
|
||
|
select SND_SOC_SOF_CLIENT
|
||
|
help
|
||
|
This option enables a separate client device for IPC flood test
|
||
|
@@ -214,6 +215,7 @@ config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM
|
||
|
|
||
|
config SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR
|
||
|
tristate "SOF enable IPC message injector"
|
||
|
+ depends on SND_SOC_SOF
|
||
|
select SND_SOC_SOF_CLIENT
|
||
|
help
|
||
|
This option enables the IPC message injector which can be used to send
|
||
|
diff --git a/sound/usb/card.c b/sound/usb/card.c
|
||
|
index d356743de2ff9..706d249a9ad6b 100644
|
||
|
--- a/sound/usb/card.c
|
||
|
+++ b/sound/usb/card.c
|
||
|
@@ -699,7 +699,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface)
|
||
|
if (delayed_register[i] &&
|
||
|
sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 &&
|
||
|
id == chip->usb_id)
|
||
|
- return inum != iface;
|
||
|
+ return iface < inum;
|
||
|
}
|
||
|
|
||
|
return false;
|
||
|
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
|
||
|
index f9c921683948d..ff2aa13b7b26f 100644
|
||
|
--- a/sound/usb/endpoint.c
|
||
|
+++ b/sound/usb/endpoint.c
|
||
|
@@ -758,7 +758,8 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
|
||
|
* The endpoint needs to be closed via snd_usb_endpoint_close() later.
|
||
|
*
|
||
|
* Note that this function doesn't configure the endpoint. The substream
|
||
|
- * needs to set it up later via snd_usb_endpoint_configure().
|
||
|
+ * needs to set it up later via snd_usb_endpoint_set_params() and
|
||
|
+ * snd_usb_endpoint_prepare().
|
||
|
*/
|
||
|
struct snd_usb_endpoint *
|
||
|
snd_usb_endpoint_open(struct snd_usb_audio *chip,
|
||
|
@@ -924,6 +925,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
|
||
|
endpoint_set_interface(chip, ep, false);
|
||
|
|
||
|
if (!--ep->opened) {
|
||
|
+ if (ep->clock_ref && !atomic_read(&ep->clock_ref->locked))
|
||
|
+ ep->clock_ref->rate = 0;
|
||
|
ep->iface = 0;
|
||
|
ep->altsetting = 0;
|
||
|
ep->cur_audiofmt = NULL;
|
||
|
@@ -1290,12 +1293,13 @@ out_of_memory:
|
||
|
/*
|
||
|
* snd_usb_endpoint_set_params: configure an snd_usb_endpoint
|
||
|
*
|
||
|
+ * It's called either from hw_params callback.
|
||
|
* Determine the number of URBs to be used on this endpoint.
|
||
|
* An endpoint must be configured before it can be started.
|
||
|
* An endpoint that is already running can not be reconfigured.
|
||
|
*/
|
||
|
-static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
|
||
|
- struct snd_usb_endpoint *ep)
|
||
|
+int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
|
||
|
+ struct snd_usb_endpoint *ep)
|
||
|
{
|
||
|
const struct audioformat *fmt = ep->cur_audiofmt;
|
||
|
int err;
|
||
|
@@ -1378,18 +1382,18 @@ static int init_sample_rate(struct snd_usb_audio *chip,
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
- * snd_usb_endpoint_configure: Configure the endpoint
|
||
|
+ * snd_usb_endpoint_prepare: Prepare the endpoint
|
||
|
*
|
||
|
* This function sets up the EP to be fully usable state.
|
||
|
- * It's called either from hw_params or prepare callback.
|
||
|
+ * It's called either from prepare callback.
|
||
|
* The function checks need_setup flag, and performs nothing unless needed,
|
||
|
* so it's safe to call this multiple times.
|
||
|
*
|
||
|
* This returns zero if unchanged, 1 if the configuration has changed,
|
||
|
* or a negative error code.
|
||
|
*/
|
||
|
-int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
|
||
|
- struct snd_usb_endpoint *ep)
|
||
|
+int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
|
||
|
+ struct snd_usb_endpoint *ep)
|
||
|
{
|
||
|
bool iface_first;
|
||
|
int err = 0;
|
||
|
@@ -1410,9 +1414,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
|
||
|
if (err < 0)
|
||
|
goto unlock;
|
||
|
}
|
||
|
- err = snd_usb_endpoint_set_params(chip, ep);
|
||
|
- if (err < 0)
|
||
|
- goto unlock;
|
||
|
goto done;
|
||
|
}
|
||
|
|
||
|
@@ -1440,10 +1441,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
|
||
|
if (err < 0)
|
||
|
goto unlock;
|
||
|
|
||
|
- err = snd_usb_endpoint_set_params(chip, ep);
|
||
|
- if (err < 0)
|
||
|
- goto unlock;
|
||
|
-
|
||
|
err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
|
||
|
if (err < 0)
|
||
|
goto unlock;
|
||
|
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
|
||
|
index 6a9af04cf175a..e67ea28faa54f 100644
|
||
|
--- a/sound/usb/endpoint.h
|
||
|
+++ b/sound/usb/endpoint.h
|
||
|
@@ -17,8 +17,10 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
|
||
|
bool is_sync_ep);
|
||
|
void snd_usb_endpoint_close(struct snd_usb_audio *chip,
|
||
|
struct snd_usb_endpoint *ep);
|
||
|
-int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
|
||
|
- struct snd_usb_endpoint *ep);
|
||
|
+int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
|
||
|
+ struct snd_usb_endpoint *ep);
|
||
|
+int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
|
||
|
+ struct snd_usb_endpoint *ep);
|
||
|
int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
|
||
|
|
||
|
bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
|
||
|
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
|
||
|
index e692ae04436a5..02035b545f9dd 100644
|
||
|
--- a/sound/usb/pcm.c
|
||
|
+++ b/sound/usb/pcm.c
|
||
|
@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
|
||
|
if (stop_endpoints(subs, false))
|
||
|
sync_pending_stops(subs);
|
||
|
if (subs->sync_endpoint) {
|
||
|
- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
|
||
|
+ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
|
||
|
if (err < 0)
|
||
|
return err;
|
||
|
}
|
||
|
- err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
|
||
|
+ err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
|
||
|
if (err < 0)
|
||
|
return err;
|
||
|
snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
|
||
|
} else {
|
||
|
if (subs->sync_endpoint) {
|
||
|
- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
|
||
|
+ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
|
||
|
if (err < 0)
|
||
|
return err;
|
||
|
}
|
||
|
@@ -551,7 +551,13 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
|
||
|
subs->cur_audiofmt = fmt;
|
||
|
mutex_unlock(&chip->mutex);
|
||
|
|
||
|
- ret = configure_endpoints(chip, subs);
|
||
|
+ if (subs->sync_endpoint) {
|
||
|
+ ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
|
||
|
+ if (ret < 0)
|
||
|
+ goto unlock;
|
||
|
+ }
|
||
|
+
|
||
|
+ ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
|
||
|
|
||
|
unlock:
|
||
|
if (ret < 0)
|
||
|
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
|
||
|
index 9bfead5efc4c1..5b4d8f5eade20 100644
|
||
|
--- a/sound/usb/quirks.c
|
||
|
+++ b/sound/usb/quirks.c
|
||
|
@@ -1764,7 +1764,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface)
|
||
|
|
||
|
for (q = registration_quirks; q->usb_id; q++)
|
||
|
if (chip->usb_id == q->usb_id)
|
||
|
- return iface != q->interface;
|
||
|
+ return iface < q->interface;
|
||
|
|
||
|
/* Register as normal */
|
||
|
return false;
|
||
|
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
|
||
|
index ceb93d798182c..f10f4e6d3fb85 100644
|
||
|
--- a/sound/usb/stream.c
|
||
|
+++ b/sound/usb/stream.c
|
||
|
@@ -495,6 +495,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
|
||
|
return 0;
|
||
|
}
|
||
|
}
|
||
|
+
|
||
|
+ if (chip->card->registered)
|
||
|
+ chip->need_delayed_register = true;
|
||
|
+
|
||
|
/* look for an empty stream */
|
||
|
list_for_each_entry(as, &chip->pcm_list, list) {
|
||
|
if (as->fmt_type != fp->fmt_type)
|
||
|
@@ -502,9 +506,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
|
||
|
subs = &as->substream[stream];
|
||
|
if (subs->ep_num)
|
||
|
continue;
|
||
|
- if (snd_device_get_state(chip->card, as->pcm) !=
|
||
|
- SNDRV_DEV_BUILD)
|
||
|
- chip->need_delayed_register = true;
|
||
|
err = snd_pcm_new_stream(as->pcm, stream, 1);
|
||
|
if (err < 0)
|
||
|
return err;
|
||
|
@@ -1105,7 +1106,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
|
||
|
* Dallas DS4201 workaround: It presents 5 altsettings, but the last
|
||
|
* one misses syncpipe, and does not produce any sound.
|
||
|
*/
|
||
|
- if (chip->usb_id == USB_ID(0x04fa, 0x4201))
|
||
|
+ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4)
|
||
|
num = 4;
|
||
|
|
||
|
for (i = 0; i < num; i++) {
|
||
|
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
|
||
|
index e6c98a6e3908e..6b1bafe267a42 100644
|
||
|
--- a/tools/lib/perf/evlist.c
|
||
|
+++ b/tools/lib/perf/evlist.c
|
||
|
@@ -486,6 +486,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||
|
if (ops->idx)
|
||
|
ops->idx(evlist, evsel, mp, idx);
|
||
|
|
||
|
+ pr_debug("idx %d: mmapping fd %d\n", idx, *output);
|
||
|
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
|
||
|
return -1;
|
||
|
|
||
|
@@ -494,6 +495,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||
|
if (!idx)
|
||
|
perf_evlist__set_mmap_first(evlist, map, overwrite);
|
||
|
} else {
|
||
|
+ pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
|
||
|
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
|
||
|
return -1;
|
||
|
|
||
|
@@ -519,6 +521,48 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
+static int
|
||
|
+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||
|
+ struct perf_mmap_param *mp)
|
||
|
+{
|
||
|
+ int nr_threads = perf_thread_map__nr(evlist->threads);
|
||
|
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
|
||
|
+ int cpu, thread, idx = 0;
|
||
|
+ int nr_mmaps = 0;
|
||
|
+
|
||
|
+ pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
|
||
|
+ __func__, nr_cpus, nr_threads);
|
||
|
+
|
||
|
+ /* per-thread mmaps */
|
||
|
+ for (thread = 0; thread < nr_threads; thread++, idx++) {
|
||
|
+ int output = -1;
|
||
|
+ int output_overwrite = -1;
|
||
|
+
|
||
|
+ if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
|
||
|
+ &output_overwrite, &nr_mmaps))
|
||
|
+ goto out_unmap;
|
||
|
+ }
|
||
|
+
|
||
|
+ /* system-wide mmaps i.e. per-cpu */
|
||
|
+ for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
|
||
|
+ int output = -1;
|
||
|
+ int output_overwrite = -1;
|
||
|
+
|
||
|
+ if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
|
||
|
+ &output_overwrite, &nr_mmaps))
|
||
|
+ goto out_unmap;
|
||
|
+ }
|
||
|
+
|
||
|
+ if (nr_mmaps != evlist->nr_mmaps)
|
||
|
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
|
||
|
+
|
||
|
+ return 0;
|
||
|
+
|
||
|
+out_unmap:
|
||
|
+ perf_evlist__munmap(evlist);
|
||
|
+ return -1;
|
||
|
+}
|
||
|
+
|
||
|
static int
|
||
|
mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||
|
struct perf_mmap_param *mp)
|
||
|
@@ -528,6 +572,8 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
|
||
|
int nr_mmaps = 0;
|
||
|
int cpu, thread;
|
||
|
|
||
|
+ pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
|
||
|
+
|
||
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||
|
int output = -1;
|
||
|
int output_overwrite = -1;
|
||
|
@@ -569,6 +615,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
|
||
|
struct perf_evlist_mmap_ops *ops,
|
||
|
struct perf_mmap_param *mp)
|
||
|
{
|
||
|
+ const struct perf_cpu_map *cpus = evlist->all_cpus;
|
||
|
struct perf_evsel *evsel;
|
||
|
|
||
|
if (!ops || !ops->get || !ops->mmap)
|
||
|
@@ -588,6 +635,9 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
|
||
|
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
|
||
|
return -ENOMEM;
|
||
|
|
||
|
+ if (perf_cpu_map__empty(cpus))
|
||
|
+ return mmap_per_thread(evlist, ops, mp);
|
||
|
+
|
||
|
return mmap_per_cpu(evlist, ops, mp);
|
||
|
}
|
||
|
|
||
|
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
|
||
|
index 31c719f99f66e..5d87e0b0d85f9 100644
|
||
|
--- a/tools/objtool/check.c
|
||
|
+++ b/tools/objtool/check.c
|
||
|
@@ -162,32 +162,34 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
|
||
|
|
||
|
/*
|
||
|
* Unfortunately these have to be hard coded because the noreturn
|
||
|
- * attribute isn't provided in ELF data.
|
||
|
+ * attribute isn't provided in ELF data. Keep 'em sorted.
|
||
|
*/
|
||
|
static const char * const global_noreturns[] = {
|
||
|
+ "__invalid_creds",
|
||
|
+ "__module_put_and_kthread_exit",
|
||
|
+ "__reiserfs_panic",
|
||
|
"__stack_chk_fail",
|
||
|
- "panic",
|
||
|
+ "__ubsan_handle_builtin_unreachable",
|
||
|
+ "cpu_bringup_and_idle",
|
||
|
+ "cpu_startup_entry",
|
||
|
"do_exit",
|
||
|
+ "do_group_exit",
|
||
|
"do_task_dead",
|
||
|
- "kthread_exit",
|
||
|
- "make_task_dead",
|
||
|
- "__module_put_and_kthread_exit",
|
||
|
+ "ex_handler_msr_mce",
|
||
|
+ "fortify_panic",
|
||
|
"kthread_complete_and_exit",
|
||
|
- "__reiserfs_panic",
|
||
|
+ "kthread_exit",
|
||
|
+ "kunit_try_catch_throw",
|
||
|
"lbug_with_loc",
|
||
|
- "fortify_panic",
|
||
|
- "usercopy_abort",
|
||
|
"machine_real_restart",
|
||
|
+ "make_task_dead",
|
||
|
+ "panic",
|
||
|
"rewind_stack_and_make_dead",
|
||
|
- "kunit_try_catch_throw",
|
||
|
- "xen_start_kernel",
|
||
|
- "cpu_bringup_and_idle",
|
||
|
- "do_group_exit",
|
||
|
+ "sev_es_terminate",
|
||
|
+ "snp_abort",
|
||
|
"stop_this_cpu",
|
||
|
- "__invalid_creds",
|
||
|
- "cpu_startup_entry",
|
||
|
- "__ubsan_handle_builtin_unreachable",
|
||
|
- "ex_handler_msr_mce",
|
||
|
+ "usercopy_abort",
|
||
|
+ "xen_start_kernel",
|
||
|
};
|
||
|
|
||
|
if (!func)
|
||
|
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
|
||
|
index 68f681ad54c1e..777bdf182a582 100644
|
||
|
--- a/tools/perf/arch/x86/util/evlist.c
|
||
|
+++ b/tools/perf/arch/x86/util/evlist.c
|
||
|
@@ -8,8 +8,13 @@
|
||
|
#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
|
||
|
#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
|
||
|
|
||
|
-int arch_evlist__add_default_attrs(struct evlist *evlist)
|
||
|
+int arch_evlist__add_default_attrs(struct evlist *evlist,
|
||
|
+ struct perf_event_attr *attrs,
|
||
|
+ size_t nr_attrs)
|
||
|
{
|
||
|
+ if (nr_attrs)
|
||
|
+ return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
|
||
|
+
|
||
|
if (!pmu_have_event("cpu", "slots"))
|
||
|
return 0;
|
||
|
|
||
|
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
|
||
|
index 9a71f0330137e..68c878b4e5e4c 100644
|
||
|
--- a/tools/perf/builtin-record.c
|
||
|
+++ b/tools/perf/builtin-record.c
|
||
|
@@ -1892,14 +1892,18 @@ static int record__synthesize(struct record *rec, bool tail)
|
||
|
|
||
|
err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
|
||
|
machine, opts);
|
||
|
- if (err < 0)
|
||
|
+ if (err < 0) {
|
||
|
pr_warning("Couldn't synthesize bpf events.\n");
|
||
|
+ err = 0;
|
||
|
+ }
|
||
|
|
||
|
if (rec->opts.synth & PERF_SYNTH_CGROUP) {
|
||
|
err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
|
||
|
machine);
|
||
|
- if (err < 0)
|
||
|
+ if (err < 0) {
|
||
|
pr_warning("Couldn't synthesize cgroup events.\n");
|
||
|
+ err = 0;
|
||
|
+ }
|
||
|
}
|
||
|
|
||
|
if (rec->opts.nr_threads_synthesize > 1) {
|
||
|
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
|
||
|
index c689054002cca..26a572c160d6f 100644
|
||
|
--- a/tools/perf/builtin-script.c
|
||
|
+++ b/tools/perf/builtin-script.c
|
||
|
@@ -441,6 +441,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
|
||
|
struct perf_event_attr *attr = &evsel->core.attr;
|
||
|
bool allow_user_set;
|
||
|
|
||
|
+ if (evsel__is_dummy_event(evsel))
|
||
|
+ return 0;
|
||
|
+
|
||
|
if (perf_header__has_feat(&session->header, HEADER_STAT))
|
||
|
return 0;
|
||
|
|
||
|
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
|
||
|
index 5f0333a8acd8a..82e14faecc3e4 100644
|
||
|
--- a/tools/perf/builtin-stat.c
|
||
|
+++ b/tools/perf/builtin-stat.c
|
||
|
@@ -1778,6 +1778,9 @@ static int add_default_attributes(void)
|
||
|
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
|
||
|
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
|
||
|
};
|
||
|
+
|
||
|
+ struct perf_event_attr default_null_attrs[] = {};
|
||
|
+
|
||
|
/* Set attrs if no event is selected and !null_run: */
|
||
|
if (stat_config.null_run)
|
||
|
return 0;
|
||
|
@@ -1941,6 +1944,9 @@ setup_metrics:
|
||
|
free(str);
|
||
|
}
|
||
|
|
||
|
+ if (!stat_config.topdown_level)
|
||
|
+ stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
|
||
|
+
|
||
|
if (!evsel_list->core.nr_entries) {
|
||
|
if (target__has_cpu(&target))
|
||
|
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
|
||
|
@@ -1957,9 +1963,8 @@ setup_metrics:
|
||
|
}
|
||
|
if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
|
||
|
return -1;
|
||
|
-
|
||
|
- stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
|
||
|
- if (arch_evlist__add_default_attrs(evsel_list) < 0)
|
||
|
+ /* Platform specific attrs */
|
||
|
+ if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
diff --git a/tools/perf/dlfilters/dlfilter-show-cycles.c b/tools/perf/dlfilters/dlfilter-show-cycles.c
|
||
|
index 9eccc97bff82f..6d47298ebe9f6 100644
|
||
|
--- a/tools/perf/dlfilters/dlfilter-show-cycles.c
|
||
|
+++ b/tools/perf/dlfilters/dlfilter-show-cycles.c
|
||
|
@@ -98,9 +98,9 @@ int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, vo
|
||
|
static void print_vals(__u64 cycles, __u64 delta)
|
||
|
{
|
||
|
if (delta)
|
||
|
- printf("%10llu %10llu ", cycles, delta);
|
||
|
+ printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta);
|
||
|
else
|
||
|
- printf("%10llu %10s ", cycles, "");
|
||
|
+ printf("%10llu %10s ", (unsigned long long)cycles, "");
|
||
|
}
|
||
|
|
||
|
int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
|
||
|
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
|
||
|
index 48af7d379d822..efa5f006b5c61 100644
|
||
|
--- a/tools/perf/util/evlist.c
|
||
|
+++ b/tools/perf/util/evlist.c
|
||
|
@@ -342,9 +342,14 @@ int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *a
|
||
|
return evlist__add_attrs(evlist, attrs, nr_attrs);
|
||
|
}
|
||
|
|
||
|
-__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
|
||
|
+__weak int arch_evlist__add_default_attrs(struct evlist *evlist,
|
||
|
+ struct perf_event_attr *attrs,
|
||
|
+ size_t nr_attrs)
|
||
|
{
|
||
|
- return 0;
|
||
|
+ if (!nr_attrs)
|
||
|
+ return 0;
|
||
|
+
|
||
|
+ return __evlist__add_default_attrs(evlist, attrs, nr_attrs);
|
||
|
}
|
||
|
|
||
|
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
|
||
|
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
|
||
|
index 1bde9ccf4e7da..129095c0fe6d3 100644
|
||
|
--- a/tools/perf/util/evlist.h
|
||
|
+++ b/tools/perf/util/evlist.h
|
||
|
@@ -107,10 +107,13 @@ static inline int evlist__add_default(struct evlist *evlist)
|
||
|
int __evlist__add_default_attrs(struct evlist *evlist,
|
||
|
struct perf_event_attr *attrs, size_t nr_attrs);
|
||
|
|
||
|
+int arch_evlist__add_default_attrs(struct evlist *evlist,
|
||
|
+ struct perf_event_attr *attrs,
|
||
|
+ size_t nr_attrs);
|
||
|
+
|
||
|
#define evlist__add_default_attrs(evlist, array) \
|
||
|
- __evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
|
||
|
+ arch_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
|
||
|
|
||
|
-int arch_evlist__add_default_attrs(struct evlist *evlist);
|
||
|
struct evsel *arch_evlist__leader(struct list_head *list);
|
||
|
|
||
|
int evlist__add_dummy(struct evlist *evlist);
|