From 9bd618683851d3f82a575305321f69423c16c93c Mon Sep 17 00:00:00 2001 From: Gerben Jan Dijkman Date: Sun, 8 Jan 2023 16:22:21 +0100 Subject: [PATCH] Verion bump --- sys-kernel/pinephone-sources/Manifest | 2 +- ...-dts-pinephone-pro-remove-modem-node.patch | 11 +- ...ent-clocks-to-lower-speed-oscillator.patch | 50 - ...104-PPP-Add-reset-resume-to-usb_wwan.patch | 21 - ...l-org-bug-210681-firmware_rome_error.patch | 12 - ...pio-make-max_brightness-configurable.patch | 49 - .../files/0106-panic-led.patch | 12 - ...hip-i2s-Dont-disable-mclk-on-suspend.patch | 29 - .../files/1003_linux-6.1.4.patch | 8792 +++++++++++++++ .../files/1500_XATTR_USER_PREFIX.patch | 33 +- ...ink-security-restrictions-by-default.patch | 17 - ...p-mark-get-entry-ip-as--maybe-unused.patch | 11 + .../files/4567_distro-Gentoo-Kconfig.patch | 21 +- .../pinephone-sources/files/5.19.10-11.patch | 1231 --- .../pinephone-sources/files/5.19.11-12.patch | 9776 ----------------- .../pinephone-sources/files/5.19.8-9.patch | 8234 -------------- .../pinephone-sources/files/5.19.9-10.patch | 1723 --- ...0_enable-cpu-optimizations-universal.patch | 217 +- ...20_BMQ-and-PDS-io-scheduler-v6.1-r0.patch} | 508 +- .../5021_BMQ-and-PDS-gentoo-defaults.patch | 13 - ...hed-alt-missing-rq-lock-irq-function.patch | 30 + sys-kernel/pinephone-sources/files/config-ppp | 486 +- ....ebuild => pinephone-sources-6.1.4.ebuild} | 28 +- 23 files changed, 9639 insertions(+), 21667 deletions(-) delete mode 100644 sys-kernel/pinephone-sources/files/0103-ccu-sun50i-a64-reparent-clocks-to-lower-speed-oscillator.patch delete mode 100644 sys-kernel/pinephone-sources/files/0104-PPP-Add-reset-resume-to-usb_wwan.patch delete mode 100644 sys-kernel/pinephone-sources/files/0104-quirk-kernel-org-bug-210681-firmware_rome_error.patch delete mode 100644 sys-kernel/pinephone-sources/files/0105-leds-gpio-make-max_brightness-configurable.patch delete mode 100644 sys-kernel/pinephone-sources/files/0106-panic-led.patch delete mode 100644 sys-kernel/pinephone-sources/files/0106-sound-rockchip-i2s-Dont-disable-mclk-on-suspend.patch create mode 100644 sys-kernel/pinephone-sources/files/1003_linux-6.1.4.patch delete mode 100644 sys-kernel/pinephone-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch create mode 100644 sys-kernel/pinephone-sources/files/2910_bfp-mark-get-entry-ip-as--maybe-unused.patch delete mode 100644 sys-kernel/pinephone-sources/files/5.19.10-11.patch delete mode 100644 sys-kernel/pinephone-sources/files/5.19.11-12.patch delete mode 100644 sys-kernel/pinephone-sources/files/5.19.8-9.patch delete mode 100644 sys-kernel/pinephone-sources/files/5.19.9-10.patch rename sys-kernel/pinephone-sources/files/{5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch => 5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch} (95%) delete mode 100644 sys-kernel/pinephone-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch create mode 100644 sys-kernel/pinephone-sources/files/5021_sched-alt-missing-rq-lock-irq-function.patch rename sys-kernel/pinephone-sources/{pinephone-sources-5.19.12.ebuild => pinephone-sources-6.1.4.ebuild} (69%) diff --git a/sys-kernel/pinephone-sources/Manifest b/sys-kernel/pinephone-sources/Manifest index 91e1408..fc54382 100644 --- a/sys-kernel/pinephone-sources/Manifest +++ b/sys-kernel/pinephone-sources/Manifest @@ -1 +1 @@ -DIST orange-pi-5.19-20220909-1622.tar.gz 215047997 BLAKE2B 8d9b57d5e4c52e08caf97749912ba14eff7b328eb8fa6e00ba5a7f3bf47b4064c1272162602fdbda9852eea6f7473033c01b491ef09ca6a9aa3ee0f1375145ac SHA512 c2d085522c0332d6b95dde22af92c7c2a8941f94714d9d2c83249d4ddd921fe0a85226b8a09715ca37dfe0874315dd97d0d4c5511f8fe315cb29a9fef99a1109 +DIST orange-pi-6.1-20230104-1712.tar.gz 223562139 BLAKE2B 635525429db3599691dba21ac86f40492f0cb2d5060bbfcc32cd789ac2379593008b09de0dc40a189481e4e033404546aad6bbd9b32214a09c390f738d4410cf SHA512 4795a261e1016c9d5d4c11cfee43d8cd020b2c2cfed50b87840dba28b8aaee236e533c6eef582ccbbdef65fc36dd7381189c2b0bc0da0922f1282ade8252d75f diff --git a/sys-kernel/pinephone-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch b/sys-kernel/pinephone-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch index 24be3b4..24bde67 100644 --- a/sys-kernel/pinephone-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch +++ b/sys-kernel/pinephone-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch @@ -13,7 +13,7 @@ diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/ index 61c990764..13141c643 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts -@@ -326,6 +326,7 @@ vcc_4g_5v: vcc-4g-5v { +@@ -413,6 +413,7 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; vin-supply = <&vcc5v0_sys>; @@ -21,15 +21,15 @@ index 61c990764..13141c643 100644 }; vcc_4g: vcc-4g { -@@ -338,6 +339,7 @@ vcc_4g: vcc-4g { +@@ -425,6 +426,7 @@ regulator-min-microvolt = <3800000>; regulator-max-microvolt = <3800000>; - vin-supply = <&vcc_sysin>; + vin-supply = <&vcc_sys>; + regulator-always-on; }; vcc1v8_codec: vcc1v8-codec-regulator { -@@ -1058,31 +1060,6 @@ mipi_in_panel: endpoint { +@@ -1187,31 +1189,6 @@ &uart3 { status = "okay"; @@ -61,7 +61,7 @@ index 61c990764..13141c643 100644 }; &pmu_io_domains { -@@ -1153,19 +1130,6 @@ vcc_4g_5v_en: vcc-4g-5v-en-pin { +@@ -1292,19 +1269,6 @@ vcc_4g_en: vcc-4g-en-pin { rockchip,pins = <4 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>; }; @@ -83,4 +83,3 @@ index 61c990764..13141c643 100644 pmic { -- 2.34.1 - diff --git a/sys-kernel/pinephone-sources/files/0103-ccu-sun50i-a64-reparent-clocks-to-lower-speed-oscillator.patch b/sys-kernel/pinephone-sources/files/0103-ccu-sun50i-a64-reparent-clocks-to-lower-speed-oscillator.patch deleted file mode 100644 index 2a16042..0000000 --- a/sys-kernel/pinephone-sources/files/0103-ccu-sun50i-a64-reparent-clocks-to-lower-speed-oscillator.patch +++ /dev/null @@ -1,50 +0,0 @@ -diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c -index 149cfde817cb..0399d8714fd0 100644 ---- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c -+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c -@@ -984,6 +984,8 @@ static int sun50i_a64_ccu_probe(struct p - if (IS_ERR(reg)) - return PTR_ERR(reg); - -+ platform_set_drvdata(pdev, reg); -+ - /* Force the pll-audio variable divider to 3 */ - val = readl(reg + SUN50I_A64_PLL_AUDIO_REG); - val &= ~GENMASK(19, 16); -@@ -1031,12 +1031,36 @@ static const struct of_device_id sun50i_ - { } - }; - -+#define USBPHY_CFG_REG 0x0cc -+ -+static int sun50i_a64_ccu_suspend(struct device *dev) -+{ -+ void __iomem *reg = dev_get_drvdata(dev); -+ -+ writel(readl(reg + USBPHY_CFG_REG) | 0xa00000, reg + USBPHY_CFG_REG); -+ -+ return 0; -+} -+ -+static int sun50i_a64_ccu_resume(struct device *dev) -+{ -+ void __iomem *reg = dev_get_drvdata(dev); -+ -+ writel(readl(reg + USBPHY_CFG_REG) & ~0xa00000, reg + USBPHY_CFG_REG); -+ -+ return 0; -+} -+ -+static SIMPLE_DEV_PM_OPS(sun50i_a64_ccu_pm_ops, -+ sun50i_a64_ccu_suspend, sun50i_a64_ccu_resume); -+ - static struct platform_driver sun50i_a64_ccu_driver = { - .probe = sun50i_a64_ccu_probe, - .driver = { - .name = "sun50i-a64-ccu", - .suppress_bind_attrs = true, - .of_match_table = sun50i_a64_ccu_ids, -+ .pm = &sun50i_a64_ccu_pm_ops, - }, - }; - builtin_platform_driver(sun50i_a64_ccu_driver); diff --git a/sys-kernel/pinephone-sources/files/0104-PPP-Add-reset-resume-to-usb_wwan.patch b/sys-kernel/pinephone-sources/files/0104-PPP-Add-reset-resume-to-usb_wwan.patch deleted file mode 100644 index be8499f..0000000 --- a/sys-kernel/pinephone-sources/files/0104-PPP-Add-reset-resume-to-usb_wwan.patch +++ /dev/null @@ -1,21 +0,0 @@ -From 94ee175a91b2c132ca3068ee04cb2766c9f47cd7 Mon Sep 17 00:00:00 2001 -From: Hendrik Borghorst -Date: Fri, 10 Jun 2022 15:36:29 +0200 -Subject: [PATCH] PPP: Add reset resume to usb_wwan - ---- - drivers/usb/serial/option.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c -index e60425bbf53764..08fb844c534bf6 100644 ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c -@@ -2176,6 +2176,7 @@ static struct usb_serial_driver option_1port_device = { - #ifdef CONFIG_PM - .suspend = usb_wwan_suspend, - .resume = usb_wwan_resume, -+ .reset_resume = usb_wwan_resume, - #endif - }; - diff --git a/sys-kernel/pinephone-sources/files/0104-quirk-kernel-org-bug-210681-firmware_rome_error.patch b/sys-kernel/pinephone-sources/files/0104-quirk-kernel-org-bug-210681-firmware_rome_error.patch deleted file mode 100644 index 7f46da7..0000000 --- a/sys-kernel/pinephone-sources/files/0104-quirk-kernel-org-bug-210681-firmware_rome_error.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c -index 03b83aa91277..dfc6c7d1b0e7 100644 ---- a/drivers/bluetooth/btusb.c -+++ b/drivers/bluetooth/btusb.c -@@ -4070,6 +4070,7 @@ static int btusb_setup_qca(struct hci_dev *hdev) - } - if (!info) { - bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom); -+ if (ver_rom & ~0xffffU) return 0; - return -ENODEV; - } - diff --git a/sys-kernel/pinephone-sources/files/0105-leds-gpio-make-max_brightness-configurable.patch b/sys-kernel/pinephone-sources/files/0105-leds-gpio-make-max_brightness-configurable.patch deleted file mode 100644 index e844fce..0000000 --- a/sys-kernel/pinephone-sources/files/0105-leds-gpio-make-max_brightness-configurable.patch +++ /dev/null @@ -1,49 +0,0 @@ -From cb408fb65a08bd45543724c1e9b8f38ae1bebc4a Mon Sep 17 00:00:00 2001 -From: Arnaud Ferraris -Date: Tue, 4 Aug 2020 15:12:59 +0200 -Subject: [PATCH 177/183] leds-gpio: make max_brightness configurable - ---- - drivers/leds/leds-gpio.c | 4 ++++ - include/linux/leds.h | 3 ++- - 2 files changed, 6 insertions(+), 1 deletion(-) - -diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c -index 93f5b1b60fde..f8483fab1164 100644 ---- a/drivers/leds/leds-gpio.c -+++ b/drivers/leds/leds-gpio.c -@@ -108,6 +108,8 @@ static int create_gpio_led(const struct gpio_led *template, - if (ret < 0) - return ret; - -+ led_dat->cdev.max_brightness = template->max_brightness; -+ - if (template->name) { - led_dat->cdev.name = template->name; - ret = devm_led_classdev_register(parent, &led_dat->cdev); -@@ -177,6 +179,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) - if (fwnode_property_present(child, "panic-indicator")) - led.panic_indicator = 1; - -+ fwnode_property_read_u32(child, "max-brightness", &led.max_brightness); -+ - ret = create_gpio_led(&led, led_dat, dev, child, NULL); - if (ret < 0) { - fwnode_handle_put(child); -diff --git a/include/linux/leds.h b/include/linux/leds.h -index 6a8d6409c993..99a80092114d 100644 ---- a/include/linux/leds.h -+++ b/include/linux/leds.h -@@ -513,7 +513,8 @@ typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state, - struct gpio_led { - const char *name; - const char *default_trigger; -- unsigned gpio; -+ unsigned gpio; -+ unsigned max_brightness; - unsigned active_low : 1; - unsigned retain_state_suspended : 1; - unsigned panic_indicator : 1; --- -2.30.0 - diff --git a/sys-kernel/pinephone-sources/files/0106-panic-led.patch b/sys-kernel/pinephone-sources/files/0106-panic-led.patch deleted file mode 100644 index 2bb4843..0000000 --- a/sys-kernel/pinephone-sources/files/0106-panic-led.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi -index 1c555456b..05fab5d79 100644 ---- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi -+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi -@@ -78,6 +78,7 @@ green { - }; - - led-2 { -+ linux,default-trigger = "panic"; - function = LED_FUNCTION_INDICATOR; - color = ; - gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */ diff --git a/sys-kernel/pinephone-sources/files/0106-sound-rockchip-i2s-Dont-disable-mclk-on-suspend.patch b/sys-kernel/pinephone-sources/files/0106-sound-rockchip-i2s-Dont-disable-mclk-on-suspend.patch deleted file mode 100644 index 04cc463..0000000 --- a/sys-kernel/pinephone-sources/files/0106-sound-rockchip-i2s-Dont-disable-mclk-on-suspend.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 5f41055235786657509233557a3ca2950c401ec5 Mon Sep 17 00:00:00 2001 -From: marcin -Date: Wed, 15 Jun 2022 03:46:13 +0200 -Subject: [PATCH] sound/rockchip/i2s: Don't disable mclk on suspend - -This is a workaround to fix an issue with high-pitch sound after -suspend. - -This patch is actually authored by Biktorgj ---- - sound/soc/rockchip/rockchip_i2s.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c -index 4ce5d2579387..1f9d4e5e36b2 100644 ---- a/sound/soc/rockchip/rockchip_i2s.c -+++ b/sound/soc/rockchip/rockchip_i2s.c -@@ -61,7 +61,7 @@ static int i2s_runtime_suspend(struct device *dev) - struct rk_i2s_dev *i2s = dev_get_drvdata(dev); - - regcache_cache_only(i2s->regmap, true); -- clk_disable_unprepare(i2s->mclk); -+ //clk_disable_unprepare(i2s->mclk); - - return 0; - } --- -GitLab - diff --git a/sys-kernel/pinephone-sources/files/1003_linux-6.1.4.patch b/sys-kernel/pinephone-sources/files/1003_linux-6.1.4.patch new file mode 100644 index 0000000..894610d --- /dev/null +++ b/sys-kernel/pinephone-sources/files/1003_linux-6.1.4.patch @@ -0,0 +1,8792 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 42af9ca0127e5..6b838869554b1 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2300,7 +2300,13 @@ + Provide an override to the IOAPIC-ID<->DEVICE-ID + mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. +- For example: ++ ++ For example, to map IOAPIC-ID decimal 10 to ++ PCI segment 0x1 and PCI device 00:14.0, ++ write the parameter as: ++ ivrs_ioapic=10@0001:00:14.0 ++ ++ Deprecated formats: + * To map IOAPIC-ID decimal 10 to PCI device 00:14.0 + write the parameter as: + ivrs_ioapic[10]=00:14.0 +@@ -2312,7 +2318,13 @@ + Provide an override to the HPET-ID<->DEVICE-ID + mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. +- For example: ++ ++ For example, to map HPET-ID decimal 10 to ++ PCI segment 0x1 and PCI device 00:14.0, ++ write the parameter as: ++ ivrs_hpet=10@0001:00:14.0 ++ ++ Deprecated formats: + * To map HPET-ID decimal 0 to PCI device 00:14.0 + write the parameter as: + ivrs_hpet[0]=00:14.0 +@@ -2323,15 +2335,20 @@ + ivrs_acpihid [HW,X86-64] + Provide an override to the ACPI-HID:UID<->DEVICE-ID + mapping provided in the IVRS ACPI table. ++ By default, PCI segment is 0, and can be omitted. + + For example, to map UART-HID:UID AMD0020:0 to + PCI segment 0x1 and PCI device ID 00:14.5, + write the parameter as: +- ivrs_acpihid[0001:00:14.5]=AMD0020:0 ++ ivrs_acpihid=AMD0020:0@0001:00:14.5 + +- By default, PCI segment is 0, and can be omitted. +- For example, PCI device 00:14.5 write the parameter as: ++ Deprecated formats: ++ * To map UART-HID:UID AMD0020:0 to PCI segment is 0, ++ PCI device ID 00:14.5, write the parameter as: + ivrs_acpihid[00:14.5]=AMD0020:0 ++ * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and ++ PCI device ID 00:14.5, write the parameter as: ++ ivrs_acpihid[0001:00:14.5]=AMD0020:0 + + js= [HW,JOY] Analog joystick + See Documentation/input/joydev/joystick.rst. +diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst +index eb358a00be279..1d16787a00e95 100644 +--- a/Documentation/filesystems/mount_api.rst ++++ b/Documentation/filesystems/mount_api.rst +@@ -814,6 +814,7 @@ process the parameters it is given. + int fs_lookup_param(struct fs_context *fc, + struct fs_parameter *value, + bool want_bdev, ++ unsigned int flags, + struct path *_path); + + This takes a parameter that carries a string or filename type and attempts +diff --git a/Makefile b/Makefile +index a69d14983a489..56afd1509c74f 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 1 +-SUBLEVEL = 3 ++SUBLEVEL = 4 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile +index 303400fa2cdf7..2aec85ab1e8b9 100644 +--- a/arch/arm/nwfpe/Makefile ++++ b/arch/arm/nwfpe/Makefile +@@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \ + entry.o + + nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o ++ ++# Try really hard to avoid generating calls to __aeabi_uldivmod() from ++# float64_rem() due to loop elision. ++ifdef CONFIG_CC_IS_CLANG ++CFLAGS_softfloat.o += -mllvm -replexitval=never ++endif +diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +index 4fbd99eb496a2..dec85d2548384 100644 +--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts ++++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +@@ -56,10 +56,10 @@ + #size-cells = <2>; + ranges; + +- /* 192 KiB reserved for ARM Trusted Firmware (BL31) */ ++ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ + bl31_secmon_reserved: secmon@54600000 { + no-map; +- reg = <0 0x54600000 0x0 0x30000>; ++ reg = <0 0x54600000 0x0 0x200000>; + }; + + /* 12 MiB reserved for OP-TEE (BL32) +diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +index 212d63d5cbf28..9f2a136d5cbc5 100644 +--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +@@ -855,12 +855,13 @@ + required-opps = <&rpmhpd_opp_nom>; + + iommus = <&apps_smmu 0xe0 0x0>; ++ dma-coherent; + + clocks = <&gcc GCC_UFS_PHY_AXI_CLK>, + <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>, + <&gcc GCC_UFS_PHY_AHB_CLK>, + <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>, +- <&rpmhcc RPMH_CXO_CLK>, ++ <&gcc GCC_UFS_REF_CLKREF_CLK>, + <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>; +@@ -891,7 +892,7 @@ + ranges; + clock-names = "ref", + "ref_aux"; +- clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, ++ clocks = <&gcc GCC_UFS_CARD_CLKREF_CLK>, + <&gcc GCC_UFS_PHY_PHY_AUX_CLK>; + + resets = <&ufs_mem_hc 0>; +@@ -923,12 +924,13 @@ + power-domains = <&gcc UFS_CARD_GDSC>; + + iommus = <&apps_smmu 0x4a0 0x0>; ++ dma-coherent; + + clocks = <&gcc GCC_UFS_CARD_AXI_CLK>, + <&gcc GCC_AGGRE_UFS_CARD_AXI_CLK>, + <&gcc GCC_UFS_CARD_AHB_CLK>, + <&gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>, +- <&rpmhcc RPMH_CXO_CLK>, ++ <&gcc GCC_UFS_REF_CLKREF_CLK>, + <&gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_CARD_RX_SYMBOL_1_CLK>; +@@ -959,7 +961,7 @@ + ranges; + clock-names = "ref", + "ref_aux"; +- clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, ++ clocks = <&gcc GCC_UFS_1_CARD_CLKREF_CLK>, + <&gcc GCC_UFS_CARD_PHY_AUX_CLK>; + + resets = <&ufs_card_hc 0>; +diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +index 132417e2d11e5..a3e15dedd60cb 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts ++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +@@ -1123,7 +1123,10 @@ + + /* PINCTRL - additions to nodes defined in sdm845.dtsi */ + &qup_spi2_default { +- drive-strength = <16>; ++ pinconf { ++ pins = "gpio27", "gpio28", "gpio29", "gpio30"; ++ drive-strength = <16>; ++ }; + }; + + &qup_uart3_default{ +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +index be59a8ba9c1fe..74f43da51fa50 100644 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +@@ -487,8 +487,10 @@ + }; + + &qup_i2c12_default { +- drive-strength = <2>; +- bias-disable; ++ pinmux { ++ drive-strength = <2>; ++ bias-disable; ++ }; + }; + + &qup_uart6_default { +diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts +index f954fe5cb61ab..d028a7eb364a6 100644 +--- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts ++++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts +@@ -415,8 +415,10 @@ + }; + + &qup_i2c12_default { +- drive-strength = <2>; +- bias-disable; ++ pinmux { ++ drive-strength = <2>; ++ bias-disable; ++ }; + }; + + &qup_uart6_default { +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c +index 634279b3b03d1..117e2c180f3c7 100644 +--- a/arch/arm64/kernel/stacktrace.c ++++ b/arch/arm64/kernel/stacktrace.c +@@ -23,8 +23,8 @@ + * + * The regs must be on a stack currently owned by the calling task. + */ +-static inline void unwind_init_from_regs(struct unwind_state *state, +- struct pt_regs *regs) ++static __always_inline void unwind_init_from_regs(struct unwind_state *state, ++ struct pt_regs *regs) + { + unwind_init_common(state, current); + +@@ -58,8 +58,8 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) + * duration of the unwind, or the unwind will be bogus. It is never valid to + * call this for the current task. + */ +-static inline void unwind_init_from_task(struct unwind_state *state, +- struct task_struct *task) ++static __always_inline void unwind_init_from_task(struct unwind_state *state, ++ struct task_struct *task) + { + unwind_init_common(state, task); + +@@ -186,7 +186,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) + : stackinfo_get_unknown(); \ + }) + +-noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, ++noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task, + struct pt_regs *regs) + { +diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h +index ecd0288544698..68ae77069d23f 100644 +--- a/arch/parisc/include/asm/pgtable.h ++++ b/arch/parisc/include/asm/pgtable.h +@@ -166,8 +166,8 @@ extern void __update_cache(pte_t pte); + + /* This calculates the number of initial pages we need for the initial + * page tables */ +-#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) +-# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) ++#if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE) ++# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE)) + #else + # define PT_INITIAL (1) /* all initial PTEs fit into one page */ + #endif +diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c +index 6a7e315bcc2e5..a115315d88e69 100644 +--- a/arch/parisc/kernel/firmware.c ++++ b/arch/parisc/kernel/firmware.c +@@ -1288,9 +1288,8 @@ void pdc_io_reset_devices(void) + + #endif /* defined(BOOTLOADER) */ + +-/* locked by pdc_console_lock */ +-static int __attribute__((aligned(8))) iodc_retbuf[32]; +-static char __attribute__((aligned(64))) iodc_dbuf[4096]; ++/* locked by pdc_lock */ ++static char iodc_dbuf[4096] __page_aligned_bss; + + /** + * pdc_iodc_print - Console print using IODC. +@@ -1307,6 +1306,9 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) + unsigned int i; + unsigned long flags; + ++ count = min_t(unsigned int, count, sizeof(iodc_dbuf)); ++ ++ spin_lock_irqsave(&pdc_lock, flags); + for (i = 0; i < count;) { + switch(str[i]) { + case '\n': +@@ -1322,12 +1324,11 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) + } + + print: +- spin_lock_irqsave(&pdc_lock, flags); +- real32_call(PAGE0->mem_cons.iodc_io, +- (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, +- PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), +- __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); +- spin_unlock_irqrestore(&pdc_lock, flags); ++ real32_call(PAGE0->mem_cons.iodc_io, ++ (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, ++ PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), ++ __pa(pdc_result), 0, __pa(iodc_dbuf), i, 0); ++ spin_unlock_irqrestore(&pdc_lock, flags); + + return i; + } +@@ -1354,10 +1355,11 @@ int pdc_iodc_getc(void) + real32_call(PAGE0->mem_kbd.iodc_io, + (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN, + PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers), +- __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0); ++ __pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0); + + ch = *iodc_dbuf; +- status = *iodc_retbuf; ++ /* like convert_to_wide() but for first return value only: */ ++ status = *(int *)&pdc_result; + spin_unlock_irqrestore(&pdc_lock, flags); + + if (status == 0) +diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c +index ab7620f695be1..b16fa9bac5f44 100644 +--- a/arch/parisc/kernel/kgdb.c ++++ b/arch/parisc/kernel/kgdb.c +@@ -208,23 +208,3 @@ int kgdb_arch_handle_exception(int trap, int signo, + } + return -1; + } +- +-/* KGDB console driver which uses PDC to read chars from keyboard */ +- +-static void kgdb_pdc_write_char(u8 chr) +-{ +- /* no need to print char. kgdb will do it. */ +-} +- +-static struct kgdb_io kgdb_pdc_io_ops = { +- .name = "kgdb_pdc", +- .read_char = pdc_iodc_getc, +- .write_char = kgdb_pdc_write_char, +-}; +- +-static int __init kgdb_pdc_init(void) +-{ +- kgdb_register_io_module(&kgdb_pdc_io_ops); +- return 0; +-} +-early_initcall(kgdb_pdc_init); +diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c +index 7d0989f523d03..cf3bf82323746 100644 +--- a/arch/parisc/kernel/pdc_cons.c ++++ b/arch/parisc/kernel/pdc_cons.c +@@ -12,37 +12,27 @@ + #include /* for PAGE0 */ + #include /* for iodc_call() proto and friends */ + +-static DEFINE_SPINLOCK(pdc_console_lock); +- + static void pdc_console_write(struct console *co, const char *s, unsigned count) + { + int i = 0; +- unsigned long flags; + +- spin_lock_irqsave(&pdc_console_lock, flags); + do { + i += pdc_iodc_print(s + i, count - i); + } while (i < count); +- spin_unlock_irqrestore(&pdc_console_lock, flags); + } + + #ifdef CONFIG_KGDB + static int kgdb_pdc_read_char(void) + { +- int c; +- unsigned long flags; +- +- spin_lock_irqsave(&pdc_console_lock, flags); +- c = pdc_iodc_getc(); +- spin_unlock_irqrestore(&pdc_console_lock, flags); ++ int c = pdc_iodc_getc(); + + return (c <= 0) ? NO_POLL_CHAR : c; + } + + static void kgdb_pdc_write_char(u8 chr) + { +- if (PAGE0->mem_cons.cl_class != CL_DUPLEX) +- pdc_console_write(NULL, &chr, 1); ++ /* no need to print char as it's shown on standard console */ ++ /* pdc_iodc_print(&chr, 1); */ + } + + static struct kgdb_io kgdb_pdc_io_ops = { +diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile +index 85b1c6d261d12..4459a48d23033 100644 +--- a/arch/parisc/kernel/vdso32/Makefile ++++ b/arch/parisc/kernel/vdso32/Makefile +@@ -26,7 +26,7 @@ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE + + # Force dependency (incbin is bad) + # link rule for the .so file, .lds has to be first +-$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) ++$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE + $(call if_changed,vdso32ld) + + # assembly rules for the .S files +@@ -38,7 +38,7 @@ $(obj-cvdso32): %.o: %.c FORCE + + # actual build commands + quiet_cmd_vdso32ld = VDSO32L $@ +- cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ ++ cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ + quiet_cmd_vdso32as = VDSO32A $@ + cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< + quiet_cmd_vdso32cc = VDSO32C $@ +diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile +index a30f5ec5eb4bf..f3d6045793f4c 100644 +--- a/arch/parisc/kernel/vdso64/Makefile ++++ b/arch/parisc/kernel/vdso64/Makefile +@@ -26,7 +26,7 @@ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE + + # Force dependency (incbin is bad) + # link rule for the .so file, .lds has to be first +-$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) ++$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE + $(call if_changed,vdso64ld) + + # assembly rules for the .S files +@@ -35,7 +35,7 @@ $(obj-vdso64): %.o: %.S FORCE + + # actual build commands + quiet_cmd_vdso64ld = VDSO64L $@ +- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ ++ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ + quiet_cmd_vdso64as = VDSO64A $@ + cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< + +diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h +index 3cee7115441b4..e3d1f377bc5b5 100644 +--- a/arch/powerpc/include/asm/ftrace.h ++++ b/arch/powerpc/include/asm/ftrace.h +@@ -64,17 +64,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + * those. + */ + #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +-#ifdef CONFIG_PPC64_ELF_ABI_V1 +-static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) +-{ +- /* We need to skip past the initial dot, and the __se_sys alias */ +- return !strcmp(sym + 1, name) || +- (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) || +- (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) || +- (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) || +- (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4)); +-} +-#else + static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) + { + return !strcmp(sym, name) || +@@ -83,7 +72,6 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name + (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || + (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); + } +-#endif /* CONFIG_PPC64_ELF_ABI_V1 */ + #endif /* CONFIG_FTRACE_SYSCALLS */ + + #if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER) +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig +index 593cf09264d80..8e5fd56820189 100644 +--- a/arch/riscv/Kconfig ++++ b/arch/riscv/Kconfig +@@ -502,7 +502,7 @@ config KEXEC_FILE + select KEXEC_CORE + select KEXEC_ELF + select HAVE_IMA_KEXEC if IMA +- depends on 64BIT ++ depends on 64BIT && MMU + help + This is new version of kexec system call. This system call is + file based and takes file descriptors as system call argument +diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h +index eee260e8ab308..2b56769cb530c 100644 +--- a/arch/riscv/include/asm/kexec.h ++++ b/arch/riscv/include/asm/kexec.h +@@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs, + #define ARCH_HAS_KIMAGE_ARCH + + struct kimage_arch { ++ void *fdt; /* For CONFIG_KEXEC_FILE */ + unsigned long fdt_addr; + }; + +@@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); + #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add ++ ++struct kimage; ++int arch_kimage_file_post_load_cleanup(struct kimage *image); ++#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup + #endif + + #endif +diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h +index 0099dc1161683..5ff1f19fd45c2 100644 +--- a/arch/riscv/include/asm/mmu.h ++++ b/arch/riscv/include/asm/mmu.h +@@ -19,6 +19,8 @@ typedef struct { + #ifdef CONFIG_SMP + /* A local icache flush is needed before user execution can resume. */ + cpumask_t icache_stale_mask; ++ /* A local tlb flush is needed before user execution can resume. */ ++ cpumask_t tlb_stale_mask; + #endif + } mm_context_t; + +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h +index 92ec2d9d7273f..ec6fb83349ced 100644 +--- a/arch/riscv/include/asm/pgtable.h ++++ b/arch/riscv/include/asm/pgtable.h +@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, + * Relying on flush_tlb_fix_spurious_fault would suffice, but + * the extra traps reduce performance. So, eagerly SFENCE.VMA. + */ +- local_flush_tlb_page(address); ++ flush_tlb_page(vma, address); + } + + static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, +diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h +index 801019381dea3..907b9efd39a87 100644 +--- a/arch/riscv/include/asm/tlbflush.h ++++ b/arch/riscv/include/asm/tlbflush.h +@@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr) + { + ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); + } ++ ++static inline void local_flush_tlb_all_asid(unsigned long asid) ++{ ++ __asm__ __volatile__ ("sfence.vma x0, %0" ++ : ++ : "r" (asid) ++ : "memory"); ++} ++ ++static inline void local_flush_tlb_page_asid(unsigned long addr, ++ unsigned long asid) ++{ ++ __asm__ __volatile__ ("sfence.vma %0, %1" ++ : ++ : "r" (addr), "r" (asid) ++ : "memory"); ++} ++ + #else /* CONFIG_MMU */ + #define local_flush_tlb_all() do { } while (0) + #define local_flush_tlb_page(addr) do { } while (0) +diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c +index 0cb94992c15b3..5372b708fae21 100644 +--- a/arch/riscv/kernel/elf_kexec.c ++++ b/arch/riscv/kernel/elf_kexec.c +@@ -21,6 +21,18 @@ + #include + #include + ++int arch_kimage_file_post_load_cleanup(struct kimage *image) ++{ ++ kvfree(image->arch.fdt); ++ image->arch.fdt = NULL; ++ ++ vfree(image->elf_headers); ++ image->elf_headers = NULL; ++ image->elf_headers_sz = 0; ++ ++ return kexec_image_post_load_cleanup_default(image); ++} ++ + static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, + struct kexec_elf_info *elf_info, unsigned long old_pbase, + unsigned long new_pbase) +@@ -298,6 +310,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, + pr_err("Error add DTB kbuf ret=%d\n", ret); + goto out_free_fdt; + } ++ /* Cache the fdt buffer address for memory cleanup */ ++ image->arch.fdt = fdt; + pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); + goto out; + +diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c +index 08d11a53f39e7..bcfe9eb55f80f 100644 +--- a/arch/riscv/kernel/stacktrace.c ++++ b/arch/riscv/kernel/stacktrace.c +@@ -58,7 +58,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + } else { + fp = frame->fp; + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, +- (unsigned long *)(fp - 8)); ++ &frame->ra); + } + + } +diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c +index 7acbfbd14557e..80ce9caba8d22 100644 +--- a/arch/riscv/mm/context.c ++++ b/arch/riscv/mm/context.c +@@ -196,6 +196,16 @@ switch_mm_fast: + + if (need_flush_tlb) + local_flush_tlb_all(); ++#ifdef CONFIG_SMP ++ else { ++ cpumask_t *mask = &mm->context.tlb_stale_mask; ++ ++ if (cpumask_test_cpu(cpu, mask)) { ++ cpumask_clear_cpu(cpu, mask); ++ local_flush_tlb_all_asid(cntx & asid_mask); ++ } ++ } ++#endif + } + + static void set_mm_noasid(struct mm_struct *mm) +diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c +index 37ed760d007c3..ce7dfc81bb3fe 100644 +--- a/arch/riscv/mm/tlbflush.c ++++ b/arch/riscv/mm/tlbflush.c +@@ -5,23 +5,7 @@ + #include + #include + #include +- +-static inline void local_flush_tlb_all_asid(unsigned long asid) +-{ +- __asm__ __volatile__ ("sfence.vma x0, %0" +- : +- : "r" (asid) +- : "memory"); +-} +- +-static inline void local_flush_tlb_page_asid(unsigned long addr, +- unsigned long asid) +-{ +- __asm__ __volatile__ ("sfence.vma %0, %1" +- : +- : "r" (addr), "r" (asid) +- : "memory"); +-} ++#include + + void flush_tlb_all(void) + { +@@ -31,6 +15,7 @@ void flush_tlb_all(void) + static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, + unsigned long size, unsigned long stride) + { ++ struct cpumask *pmask = &mm->context.tlb_stale_mask; + struct cpumask *cmask = mm_cpumask(mm); + unsigned int cpuid; + bool broadcast; +@@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, + if (static_branch_unlikely(&use_asid_allocator)) { + unsigned long asid = atomic_long_read(&mm->context.id); + ++ /* ++ * TLB will be immediately flushed on harts concurrently ++ * executing this MM context. TLB flush on other harts ++ * is deferred until this MM context migrates there. ++ */ ++ cpumask_setall(pmask); ++ cpumask_clear_cpu(cpuid, pmask); ++ cpumask_andnot(pmask, pmask, cmask); ++ + if (broadcast) { + sbi_remote_sfence_vma_asid(cmask, start, size, asid); + } else if (size <= stride) { +diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c +index acb55b302b14c..3ac220dafec4a 100644 +--- a/arch/um/drivers/virt-pci.c ++++ b/arch/um/drivers/virt-pci.c +@@ -97,7 +97,8 @@ static int um_pci_send_cmd(struct um_pci_device *dev, + } + + buf = get_cpu_var(um_pci_msg_bufs); +- memcpy(buf, cmd, cmd_size); ++ if (buf) ++ memcpy(buf, cmd, cmd_size); + + if (posted) { + u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC); +@@ -182,6 +183,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, + struct um_pci_message_buffer *buf; + u8 *data; + unsigned long ret = ULONG_MAX; ++ size_t bytes = sizeof(buf->data); + + if (!dev) + return ULONG_MAX; +@@ -189,7 +191,8 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, + buf = get_cpu_var(um_pci_msg_bufs); + data = buf->data; + +- memset(buf->data, 0xff, sizeof(buf->data)); ++ if (buf) ++ memset(data, 0xff, bytes); + + switch (size) { + case 1: +@@ -204,7 +207,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, + goto out; + } + +- if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, 8)) ++ if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes)) + goto out; + + switch (size) { +diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h +index 2adeaf4de4df6..b363fddc2a89e 100644 +--- a/arch/x86/events/intel/uncore.h ++++ b/arch/x86/events/intel/uncore.h +@@ -2,6 +2,7 @@ + #include + #include + #include ++#include + #include + + #include +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c +index fcd95e93f479a..8f371f3cbbd24 100644 +--- a/arch/x86/events/intel/uncore_snbep.c ++++ b/arch/x86/events/intel/uncore_snbep.c +@@ -3804,6 +3804,21 @@ static const struct attribute_group *skx_iio_attr_update[] = { + NULL, + }; + ++static void pmu_clear_mapping_attr(const struct attribute_group **groups, ++ struct attribute_group *ag) ++{ ++ int i; ++ ++ for (i = 0; groups[i]; i++) { ++ if (groups[i] == ag) { ++ for (i++; groups[i]; i++) ++ groups[i - 1] = groups[i]; ++ groups[i - 1] = NULL; ++ break; ++ } ++ } ++} ++ + static int + pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) + { +@@ -3852,7 +3867,7 @@ clear_attrs: + clear_topology: + kfree(type->topology); + clear_attr_update: +- type->attr_update = NULL; ++ pmu_clear_mapping_attr(type->attr_update, ag); + return ret; + } + +@@ -5144,6 +5159,11 @@ static int icx_iio_get_topology(struct intel_uncore_type *type) + + static int icx_iio_set_mapping(struct intel_uncore_type *type) + { ++ /* Detect ICX-D system. This case is not supported */ ++ if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { ++ pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group); ++ return -EPERM; ++ } + return pmu_iio_set_mapping(type, &icx_iio_mapping_group); + } + +diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c +index 1c87501e0fa3d..10fb5b5c9efa4 100644 +--- a/arch/x86/kernel/cpu/mce/amd.c ++++ b/arch/x86/kernel/cpu/mce/amd.c +@@ -788,6 +788,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) + return status & MCI_STATUS_DEFERRED; + } + ++static bool _log_error_deferred(unsigned int bank, u32 misc) ++{ ++ if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), ++ mca_msr_reg(bank, MCA_ADDR), misc)) ++ return false; ++ ++ /* ++ * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. ++ * Return true here to avoid accessing these registers. ++ */ ++ if (!mce_flags.smca) ++ return true; ++ ++ /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ ++ wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); ++ return true; ++} ++ + /* + * We have three scenarios for checking for Deferred errors: + * +@@ -799,19 +817,8 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) + */ + static void log_error_deferred(unsigned int bank) + { +- bool defrd; +- +- defrd = _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), +- mca_msr_reg(bank, MCA_ADDR), 0); +- +- if (!mce_flags.smca) +- return; +- +- /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ +- if (defrd) { +- wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); ++ if (_log_error_deferred(bank, 0)) + return; +- } + + /* + * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check +@@ -832,7 +839,7 @@ static void amd_deferred_error_interrupt(void) + + static void log_error_thresholding(unsigned int bank, u64 misc) + { +- _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), mca_msr_reg(bank, MCA_ADDR), misc); ++ _log_error_deferred(bank, misc); + } + + static void log_and_reset_block(struct threshold_block *block) +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c +index 1fcbd671f1dff..048e38ec99e71 100644 +--- a/arch/x86/kernel/cpu/microcode/intel.c ++++ b/arch/x86/kernel/cpu/microcode/intel.c +@@ -621,7 +621,6 @@ void load_ucode_intel_ap(void) + else + iup = &intel_ucode_patch; + +-reget: + if (!*iup) { + patch = __load_ucode_intel(&uci); + if (!patch) +@@ -632,12 +631,7 @@ reget: + + uci.mc = *iup; + +- if (apply_microcode_early(&uci, true)) { +- /* Mixed-silicon system? Try to refetch the proper patch: */ +- *iup = NULL; +- +- goto reget; +- } ++ apply_microcode_early(&uci, true); + } + + static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c +index 59e543b95a3c6..c2dde46a538e7 100644 +--- a/arch/x86/kernel/fpu/xstate.c ++++ b/arch/x86/kernel/fpu/xstate.c +@@ -440,8 +440,8 @@ static void __init __xstate_dump_leaves(void) + } + } + +-#define XSTATE_WARN_ON(x) do { \ +- if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \ ++#define XSTATE_WARN_ON(x, fmt, ...) do { \ ++ if (WARN_ONCE(x, "XSAVE consistency problem: " fmt, ##__VA_ARGS__)) { \ + __xstate_dump_leaves(); \ + } \ + } while (0) +@@ -554,8 +554,7 @@ static bool __init check_xstate_against_struct(int nr) + (nr >= XFEATURE_MAX) || + (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) || + ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_RSRVD_COMP_16))) { +- WARN_ONCE(1, "no structure for xstate: %d\n", nr); +- XSTATE_WARN_ON(1); ++ XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr); + return false; + } + return true; +@@ -598,12 +597,13 @@ static bool __init paranoid_xstate_size_valid(unsigned int kernel_size) + * XSAVES. + */ + if (!xsaves && xfeature_is_supervisor(i)) { +- XSTATE_WARN_ON(1); ++ XSTATE_WARN_ON(1, "Got supervisor feature %d, but XSAVES not advertised\n", i); + return false; + } + } + size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted); +- XSTATE_WARN_ON(size != kernel_size); ++ XSTATE_WARN_ON(size != kernel_size, ++ "size %u != kernel_size %u\n", size, kernel_size); + return size == kernel_size; + } + +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c +index bd165004776d9..e07234ec7e237 100644 +--- a/arch/x86/kernel/ftrace.c ++++ b/arch/x86/kernel/ftrace.c +@@ -217,7 +217,9 @@ void ftrace_replace_code(int enable) + + ret = ftrace_verify_code(rec->ip, old); + if (ret) { ++ ftrace_expected = old; + ftrace_bug(ret, rec); ++ ftrace_expected = NULL; + return; + } + } +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index eb8bc82846b99..5be7f23099e1f 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr) + if (ret < 0) + return 0; + ++#ifdef CONFIG_KGDB + /* +- * Another debugging subsystem might insert this breakpoint. +- * In that case, we can't recover it. ++ * If there is a dynamically installed kgdb sw breakpoint, ++ * this function should not be probed. + */ +- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) ++ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && ++ kgdb_has_hit_break(addr)) + return 0; ++#endif + addr += insn.length; + } + +diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c +index e6b8c5362b945..e57e07b0edb64 100644 +--- a/arch/x86/kernel/kprobes/opt.c ++++ b/arch/x86/kernel/kprobes/opt.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn) + return ret; + } + +-static bool is_padding_int3(unsigned long addr, unsigned long eaddr) +-{ +- unsigned char ops; +- +- for (; addr < eaddr; addr++) { +- if (get_kernel_nofault(ops, (void *)addr) < 0 || +- ops != INT3_INSN_OPCODE) +- return false; +- } +- +- return true; +-} +- + /* Decode whole function to ensure any instructions don't jump into target */ + static int can_optimize(unsigned long paddr) + { +@@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr) + ret = insn_decode_kernel(&insn, (void *)recovered_insn); + if (ret < 0) + return 0; +- ++#ifdef CONFIG_KGDB + /* +- * In the case of detecting unknown breakpoint, this could be +- * a padding INT3 between functions. Let's check that all the +- * rest of the bytes are also INT3. ++ * If there is a dynamically installed kgdb sw breakpoint, ++ * this function should not be probed. + */ +- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) +- return is_padding_int3(addr, paddr - offset + size) ? 1 : 0; +- ++ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && ++ kgdb_has_hit_break(addr)) ++ return 0; ++#endif + /* Recover address */ + insn.kaddr = (void *)addr; + insn.next_byte = (void *)(addr + insn.length); +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index d7639d126e6c7..bf5ce862c4daf 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -2722,8 +2722,6 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, + icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); + __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); + } +- } else { +- kvm_lapic_xapic_id_updated(vcpu->arch.apic); + } + + return 0; +@@ -2759,6 +2757,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) + } + memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); + ++ if (!apic_x2apic_mode(apic)) ++ kvm_lapic_xapic_id_updated(apic); ++ + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); + kvm_recalculate_apic_map(vcpu->kvm); + kvm_apic_set_version(vcpu); +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c +index 5b0d4859e4b78..10c63b1bf92fa 100644 +--- a/arch/x86/kvm/vmx/nested.c ++++ b/arch/x86/kvm/vmx/nested.c +@@ -5100,24 +5100,35 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) + | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; + + /* +- * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks +- * that have higher priority than VM-Exit (see Intel SDM's pseudocode +- * for VMXON), as KVM must load valid CR0/CR4 values into hardware while +- * running the guest, i.e. KVM needs to check the _guest_ values. ++ * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter ++ * the guest and so cannot rely on hardware to perform the check, ++ * which has higher priority than VM-Exit (see Intel SDM's pseudocode ++ * for VMXON). + * +- * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and +- * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real +- * Mode, but KVM will never take the guest out of those modes. ++ * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 ++ * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't ++ * force any of the relevant guest state. For a restricted guest, KVM ++ * does force CR0.PE=1, but only to also force VM86 in order to emulate ++ * Real Mode, and so there's no need to check CR0.PE manually. + */ +- if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || +- !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { ++ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } + + /* +- * CPL=0 and all other checks that are lower priority than VM-Exit must +- * be checked manually. ++ * The CPL is checked for "not in VMX operation" and for "in VMX root", ++ * and has higher priority than the VM-Fail due to being post-VMXON, ++ * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, ++ * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits ++ * from L2 to L1, i.e. there's no need to check for the vCPU being in ++ * VMX non-root. ++ * ++ * Forwarding the VM-Exit unconditionally, i.e. without performing the ++ * #UD checks (see above), is functionally ok because KVM doesn't allow ++ * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's ++ * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are ++ * missed by hardware due to shadowing CR0 and/or CR4. + */ + if (vmx_get_cpl(vcpu)) { + kvm_inject_gp(vcpu, 0); +@@ -5127,6 +5138,17 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) + if (vmx->nested.vmxon) + return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + ++ /* ++ * Invalid CR0/CR4 generates #GP. These checks are performed if and ++ * only if the vCPU isn't already in VMX operation, i.e. effectively ++ * have lower priority than the VM-Fail above. ++ */ ++ if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || ++ !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { ++ kvm_inject_gp(vcpu, 0); ++ return 1; ++ } ++ + if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) + != VMXON_NEEDED_FEATURES) { + kvm_inject_gp(vcpu, 0); +@@ -6808,7 +6830,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) + SECONDARY_EXEC_ENABLE_INVPCID | + SECONDARY_EXEC_RDSEED_EXITING | + SECONDARY_EXEC_XSAVES | +- SECONDARY_EXEC_TSC_SCALING; ++ SECONDARY_EXEC_TSC_SCALING | ++ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; + + /* + * We can emulate "VMCS shadowing," even if the hardware +diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c +index 8f95c7c014335..b12da2a6dec95 100644 +--- a/arch/x86/kvm/vmx/sgx.c ++++ b/arch/x86/kvm/vmx/sgx.c +@@ -182,8 +182,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, + /* Enforce CPUID restriction on max enclave size. */ + max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : + sgx_12_0->edx; +- if (size >= BIT_ULL(max_size_log2)) ++ if (size >= BIT_ULL(max_size_log2)) { + kvm_inject_gp(vcpu, 0); ++ return 1; ++ } + + /* + * sgx_virt_ecreate() returns: +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c +index b0bc8897c924f..2a31b1ab0c9f2 100644 +--- a/arch/xtensa/kernel/xtensa_ksyms.c ++++ b/arch/xtensa/kernel/xtensa_ksyms.c +@@ -62,6 +62,7 @@ extern int __modsi3(int, int); + extern int __mulsi3(int, int); + extern unsigned int __udivsi3(unsigned int, unsigned int); + extern unsigned int __umodsi3(unsigned int, unsigned int); ++extern unsigned long long __umulsidi3(unsigned int, unsigned int); + + EXPORT_SYMBOL(__ashldi3); + EXPORT_SYMBOL(__ashrdi3); +@@ -71,6 +72,7 @@ EXPORT_SYMBOL(__modsi3); + EXPORT_SYMBOL(__mulsi3); + EXPORT_SYMBOL(__udivsi3); + EXPORT_SYMBOL(__umodsi3); ++EXPORT_SYMBOL(__umulsidi3); + + unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) + { +diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile +index d4e9c397e3fde..7ecef0519a27c 100644 +--- a/arch/xtensa/lib/Makefile ++++ b/arch/xtensa/lib/Makefile +@@ -5,7 +5,7 @@ + + lib-y += memcopy.o memset.o checksum.o \ + ashldi3.o ashrdi3.o lshrdi3.o \ +- divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \ ++ divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \ + usercopy.o strncpy_user.o strnlen_user.o + lib-$(CONFIG_PCI) += pci-auto.o + lib-$(CONFIG_KCSAN) += kcsan-stubs.o +diff --git a/arch/xtensa/lib/umulsidi3.S b/arch/xtensa/lib/umulsidi3.S +new file mode 100644 +index 0000000000000..1360816479427 +--- /dev/null ++++ b/arch/xtensa/lib/umulsidi3.S +@@ -0,0 +1,230 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ ++#include ++#include ++#include ++ ++#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16 ++#define XCHAL_NO_MUL 1 ++#endif ++ ++ENTRY(__umulsidi3) ++ ++#ifdef __XTENSA_CALL0_ABI__ ++ abi_entry(32) ++ s32i a12, sp, 16 ++ s32i a13, sp, 20 ++ s32i a14, sp, 24 ++ s32i a15, sp, 28 ++#elif XCHAL_NO_MUL ++ /* This is not really a leaf function; allocate enough stack space ++ to allow CALL12s to a helper function. */ ++ abi_entry(32) ++#else ++ abi_entry_default ++#endif ++ ++#ifdef __XTENSA_EB__ ++#define wh a2 ++#define wl a3 ++#else ++#define wh a3 ++#define wl a2 ++#endif /* __XTENSA_EB__ */ ++ ++ /* This code is taken from the mulsf3 routine in ieee754-sf.S. ++ See more comments there. */ ++ ++#if XCHAL_HAVE_MUL32_HIGH ++ mull a6, a2, a3 ++ muluh wh, a2, a3 ++ mov wl, a6 ++ ++#else /* ! MUL32_HIGH */ ++ ++#if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL ++ /* a0 and a8 will be clobbered by calling the multiply function ++ but a8 is not used here and need not be saved. */ ++ s32i a0, sp, 0 ++#endif ++ ++#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 ++ ++#define a2h a4 ++#define a3h a5 ++ ++ /* Get the high halves of the inputs into registers. */ ++ srli a2h, a2, 16 ++ srli a3h, a3, 16 ++ ++#define a2l a2 ++#define a3l a3 ++ ++#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16 ++ /* Clear the high halves of the inputs. This does not matter ++ for MUL16 because the high bits are ignored. */ ++ extui a2, a2, 0, 16 ++ extui a3, a3, 0, 16 ++#endif ++#endif /* MUL16 || MUL32 */ ++ ++ ++#if XCHAL_HAVE_MUL16 ++ ++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ ++ mul16u dst, xreg ## xhalf, yreg ## yhalf ++ ++#elif XCHAL_HAVE_MUL32 ++ ++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ ++ mull dst, xreg ## xhalf, yreg ## yhalf ++ ++#elif XCHAL_HAVE_MAC16 ++ ++/* The preprocessor insists on inserting a space when concatenating after ++ a period in the definition of do_mul below. These macros are a workaround ++ using underscores instead of periods when doing the concatenation. */ ++#define umul_aa_ll umul.aa.ll ++#define umul_aa_lh umul.aa.lh ++#define umul_aa_hl umul.aa.hl ++#define umul_aa_hh umul.aa.hh ++ ++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ ++ umul_aa_ ## xhalf ## yhalf xreg, yreg; \ ++ rsr dst, ACCLO ++ ++#else /* no multiply hardware */ ++ ++#define set_arg_l(dst, src) \ ++ extui dst, src, 0, 16 ++#define set_arg_h(dst, src) \ ++ srli dst, src, 16 ++ ++#ifdef __XTENSA_CALL0_ABI__ ++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ ++ set_arg_ ## xhalf (a13, xreg); \ ++ set_arg_ ## yhalf (a14, yreg); \ ++ call0 .Lmul_mulsi3; \ ++ mov dst, a12 ++#else ++#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ ++ set_arg_ ## xhalf (a14, xreg); \ ++ set_arg_ ## yhalf (a15, yreg); \ ++ call12 .Lmul_mulsi3; \ ++ mov dst, a14 ++#endif /* __XTENSA_CALL0_ABI__ */ ++ ++#endif /* no multiply hardware */ ++ ++ /* Add pp1 and pp2 into a6 with carry-out in a9. */ ++ do_mul(a6, a2, l, a3, h) /* pp 1 */ ++ do_mul(a11, a2, h, a3, l) /* pp 2 */ ++ movi a9, 0 ++ add a6, a6, a11 ++ bgeu a6, a11, 1f ++ addi a9, a9, 1 ++1: ++ /* Shift the high half of a9/a6 into position in a9. Note that ++ this value can be safely incremented without any carry-outs. */ ++ ssai 16 ++ src a9, a9, a6 ++ ++ /* Compute the low word into a6. */ ++ do_mul(a11, a2, l, a3, l) /* pp 0 */ ++ sll a6, a6 ++ add a6, a6, a11 ++ bgeu a6, a11, 1f ++ addi a9, a9, 1 ++1: ++ /* Compute the high word into wh. */ ++ do_mul(wh, a2, h, a3, h) /* pp 3 */ ++ add wh, wh, a9 ++ mov wl, a6 ++ ++#endif /* !MUL32_HIGH */ ++ ++#if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL ++ /* Restore the original return address. */ ++ l32i a0, sp, 0 ++#endif ++#ifdef __XTENSA_CALL0_ABI__ ++ l32i a12, sp, 16 ++ l32i a13, sp, 20 ++ l32i a14, sp, 24 ++ l32i a15, sp, 28 ++ abi_ret(32) ++#else ++ abi_ret_default ++#endif ++ ++#if XCHAL_NO_MUL ++ ++ .macro do_addx2 dst, as, at, tmp ++#if XCHAL_HAVE_ADDX ++ addx2 \dst, \as, \at ++#else ++ slli \tmp, \as, 1 ++ add \dst, \tmp, \at ++#endif ++ .endm ++ ++ .macro do_addx4 dst, as, at, tmp ++#if XCHAL_HAVE_ADDX ++ addx4 \dst, \as, \at ++#else ++ slli \tmp, \as, 2 ++ add \dst, \tmp, \at ++#endif ++ .endm ++ ++ .macro do_addx8 dst, as, at, tmp ++#if XCHAL_HAVE_ADDX ++ addx8 \dst, \as, \at ++#else ++ slli \tmp, \as, 3 ++ add \dst, \tmp, \at ++#endif ++ .endm ++ ++ /* For Xtensa processors with no multiply hardware, this simplified ++ version of _mulsi3 is used for multiplying 16-bit chunks of ++ the floating-point mantissas. When using CALL0, this function ++ uses a custom ABI: the inputs are passed in a13 and a14, the ++ result is returned in a12, and a8 and a15 are clobbered. */ ++ .align 4 ++.Lmul_mulsi3: ++ abi_entry_default ++ ++ .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 ++ movi \dst, 0 ++1: add \tmp1, \src2, \dst ++ extui \tmp2, \src1, 0, 1 ++ movnez \dst, \tmp1, \tmp2 ++ ++ do_addx2 \tmp1, \src2, \dst, \tmp1 ++ extui \tmp2, \src1, 1, 1 ++ movnez \dst, \tmp1, \tmp2 ++ ++ do_addx4 \tmp1, \src2, \dst, \tmp1 ++ extui \tmp2, \src1, 2, 1 ++ movnez \dst, \tmp1, \tmp2 ++ ++ do_addx8 \tmp1, \src2, \dst, \tmp1 ++ extui \tmp2, \src1, 3, 1 ++ movnez \dst, \tmp1, \tmp2 ++ ++ srli \src1, \src1, 4 ++ slli \src2, \src2, 4 ++ bnez \src1, 1b ++ .endm ++ ++#ifdef __XTENSA_CALL0_ABI__ ++ mul_mulsi3_body a12, a13, a14, a15, a8 ++#else ++ /* The result will be written into a2, so save that argument in a4. */ ++ mov a4, a2 ++ mul_mulsi3_body a2, a4, a3, a5, a6 ++#endif ++ abi_ret_default ++#endif /* XCHAL_NO_MUL */ ++ ++ENDPROC(__umulsidi3) +diff --git a/block/mq-deadline.c b/block/mq-deadline.c +index 5639921dfa922..6672f1bce3795 100644 +--- a/block/mq-deadline.c ++++ b/block/mq-deadline.c +@@ -130,6 +130,20 @@ static u8 dd_rq_ioclass(struct request *rq) + return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); + } + ++/* ++ * get the request before `rq' in sector-sorted order ++ */ ++static inline struct request * ++deadline_earlier_request(struct request *rq) ++{ ++ struct rb_node *node = rb_prev(&rq->rb_node); ++ ++ if (node) ++ return rb_entry_rq(node); ++ ++ return NULL; ++} ++ + /* + * get the request after `rq' in sector-sorted order + */ +@@ -277,6 +291,39 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio, + return 0; + } + ++/* ++ * Check if rq has a sequential request preceding it. ++ */ ++static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq) ++{ ++ struct request *prev = deadline_earlier_request(rq); ++ ++ if (!prev) ++ return false; ++ ++ return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq); ++} ++ ++/* ++ * Skip all write requests that are sequential from @rq, even if we cross ++ * a zone boundary. ++ */ ++static struct request *deadline_skip_seq_writes(struct deadline_data *dd, ++ struct request *rq) ++{ ++ sector_t pos = blk_rq_pos(rq); ++ sector_t skipped_sectors = 0; ++ ++ while (rq) { ++ if (blk_rq_pos(rq) != pos + skipped_sectors) ++ break; ++ skipped_sectors += blk_rq_sectors(rq); ++ rq = deadline_latter_request(rq); ++ } ++ ++ return rq; ++} ++ + /* + * For the specified data direction, return the next request to + * dispatch using arrival ordered lists. +@@ -297,11 +344,16 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, + + /* + * Look for a write request that can be dispatched, that is one with +- * an unlocked target zone. ++ * an unlocked target zone. For some HDDs, breaking a sequential ++ * write stream can lead to lower throughput, so make sure to preserve ++ * sequential write streams, even if that stream crosses into the next ++ * zones and these zones are unlocked. + */ + spin_lock_irqsave(&dd->zone_lock, flags); + list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { +- if (blk_req_can_dispatch_to_zone(rq)) ++ if (blk_req_can_dispatch_to_zone(rq) && ++ (blk_queue_nonrot(rq->q) || ++ !deadline_is_seq_writes(dd, rq))) + goto out; + } + rq = NULL; +@@ -331,13 +383,19 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, + + /* + * Look for a write request that can be dispatched, that is one with +- * an unlocked target zone. ++ * an unlocked target zone. For some HDDs, breaking a sequential ++ * write stream can lead to lower throughput, so make sure to preserve ++ * sequential write streams, even if that stream crosses into the next ++ * zones and these zones are unlocked. + */ + spin_lock_irqsave(&dd->zone_lock, flags); + while (rq) { + if (blk_req_can_dispatch_to_zone(rq)) + break; +- rq = deadline_latter_request(rq); ++ if (blk_queue_nonrot(rq->q)) ++ rq = deadline_latter_request(rq); ++ else ++ rq = deadline_skip_seq_writes(dd, rq); + } + spin_unlock_irqrestore(&dd->zone_lock, flags); + +@@ -789,6 +847,18 @@ static void dd_prepare_request(struct request *rq) + rq->elv.priv[0] = NULL; + } + ++static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx) ++{ ++ struct deadline_data *dd = hctx->queue->elevator->elevator_data; ++ enum dd_prio p; ++ ++ for (p = 0; p <= DD_PRIO_MAX; p++) ++ if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE])) ++ return true; ++ ++ return false; ++} ++ + /* + * Callback from inside blk_mq_free_request(). + * +@@ -828,9 +898,10 @@ static void dd_finish_request(struct request *rq) + + spin_lock_irqsave(&dd->zone_lock, flags); + blk_req_zone_write_unlock(rq); +- if (!list_empty(&per_prio->fifo_list[DD_WRITE])) +- blk_mq_sched_mark_restart_hctx(rq->mq_hctx); + spin_unlock_irqrestore(&dd->zone_lock, flags); ++ ++ if (dd_has_write_work(rq->mq_hctx)) ++ blk_mq_sched_mark_restart_hctx(rq->mq_hctx); + } + } + +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c +index 13f10fbcd7f03..76b7e7f8894e7 100644 +--- a/drivers/acpi/video_detect.c ++++ b/drivers/acpi/video_detect.c +@@ -734,6 +734,16 @@ static bool google_cros_ec_present(void) + return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C"); + } + ++/* ++ * Windows 8 and newer no longer use the ACPI video interface, so it often ++ * does not work. So on win8+ systems prefer native brightness control. ++ * Chromebooks should always prefer native backlight control. ++ */ ++static bool prefer_native_over_acpi_video(void) ++{ ++ return acpi_osi_is_win8() || google_cros_ec_present(); ++} ++ + /* + * Determine which type of backlight interface to use on this system, + * First check cmdline, then dmi quirks, then do autodetect. +@@ -779,28 +789,16 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) + if (apple_gmux_backlight_present()) + return acpi_backlight_apple_gmux; + +- /* Chromebooks should always prefer native backlight control. */ +- if (google_cros_ec_present() && native_available) +- return acpi_backlight_native; ++ /* Use ACPI video if available, except when native should be preferred. */ ++ if ((video_caps & ACPI_VIDEO_BACKLIGHT) && ++ !(native_available && prefer_native_over_acpi_video())) ++ return acpi_backlight_video; + +- /* On systems with ACPI video use either native or ACPI video. */ +- if (video_caps & ACPI_VIDEO_BACKLIGHT) { +- /* +- * Windows 8 and newer no longer use the ACPI video interface, +- * so it often does not work. If the ACPI tables are written +- * for win8 and native brightness ctl is available, use that. +- * +- * The native check deliberately is inside the if acpi-video +- * block on older devices without acpi-video support native +- * is usually not the best choice. +- */ +- if (acpi_osi_is_win8() && native_available) +- return acpi_backlight_native; +- else +- return acpi_backlight_video; +- } ++ /* Use native if available */ ++ if (native_available) ++ return acpi_backlight_native; + +- /* No ACPI video (old hw), use vendor specific fw methods. */ ++ /* No ACPI video/native (old hw), use vendor specific fw methods. */ + return acpi_backlight_vendor; + } + +@@ -812,18 +810,6 @@ EXPORT_SYMBOL(acpi_video_get_backlight_type); + + bool acpi_video_backlight_use_native(void) + { +- /* +- * Call __acpi_video_get_backlight_type() to let it know that +- * a native backlight is available. +- */ +- __acpi_video_get_backlight_type(true); +- +- /* +- * For now just always return true. There is a whole bunch of laptop +- * models where (video_caps & ACPI_VIDEO_BACKLIGHT) is false causing +- * __acpi_video_get_backlight_type() to return vendor, while these +- * models only have a native backlight control. +- */ +- return true; ++ return __acpi_video_get_backlight_type(true) == acpi_backlight_native; + } + EXPORT_SYMBOL(acpi_video_backlight_use_native); +diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h +index 7add8e79912b1..ff8e6ae1c6362 100644 +--- a/drivers/ata/ahci.h ++++ b/drivers/ata/ahci.h +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + /* Enclosure Management Control */ + #define EM_CTRL_MSG_TYPE 0x000f0000 +@@ -53,12 +54,12 @@ enum { + AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + + AHCI_CMD_TBL_AR_SZ + + (AHCI_RX_FIS_SZ * 16), +- AHCI_IRQ_ON_SG = (1 << 31), +- AHCI_CMD_ATAPI = (1 << 5), +- AHCI_CMD_WRITE = (1 << 6), +- AHCI_CMD_PREFETCH = (1 << 7), +- AHCI_CMD_RESET = (1 << 8), +- AHCI_CMD_CLR_BUSY = (1 << 10), ++ AHCI_IRQ_ON_SG = BIT(31), ++ AHCI_CMD_ATAPI = BIT(5), ++ AHCI_CMD_WRITE = BIT(6), ++ AHCI_CMD_PREFETCH = BIT(7), ++ AHCI_CMD_RESET = BIT(8), ++ AHCI_CMD_CLR_BUSY = BIT(10), + + RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ + RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ +@@ -76,37 +77,37 @@ enum { + HOST_CAP2 = 0x24, /* host capabilities, extended */ + + /* HOST_CTL bits */ +- HOST_RESET = (1 << 0), /* reset controller; self-clear */ +- HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ +- HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ +- HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ ++ HOST_RESET = BIT(0), /* reset controller; self-clear */ ++ HOST_IRQ_EN = BIT(1), /* global IRQ enable */ ++ HOST_MRSM = BIT(2), /* MSI Revert to Single Message */ ++ HOST_AHCI_EN = BIT(31), /* AHCI enabled */ + + /* HOST_CAP bits */ +- HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ +- HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ +- HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ +- HOST_CAP_PART = (1 << 13), /* Partial state capable */ +- HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ +- HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ +- HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ +- HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ +- HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ +- HOST_CAP_CLO = (1 << 24), /* Command List Override support */ +- HOST_CAP_LED = (1 << 25), /* Supports activity LED */ +- HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ +- HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ +- HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ +- HOST_CAP_SNTF = (1 << 29), /* SNotification register */ +- HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ +- HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ ++ HOST_CAP_SXS = BIT(5), /* Supports External SATA */ ++ HOST_CAP_EMS = BIT(6), /* Enclosure Management support */ ++ HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */ ++ HOST_CAP_PART = BIT(13), /* Partial state capable */ ++ HOST_CAP_SSC = BIT(14), /* Slumber state capable */ ++ HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */ ++ HOST_CAP_FBS = BIT(16), /* FIS-based switching support */ ++ HOST_CAP_PMP = BIT(17), /* Port Multiplier support */ ++ HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */ ++ HOST_CAP_CLO = BIT(24), /* Command List Override support */ ++ HOST_CAP_LED = BIT(25), /* Supports activity LED */ ++ HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */ ++ HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */ ++ HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */ ++ HOST_CAP_SNTF = BIT(29), /* SNotification register */ ++ HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */ ++ HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */ + + /* HOST_CAP2 bits */ +- HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ +- HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ +- HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ +- HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ +- HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ +- HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ ++ HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */ ++ HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */ ++ HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */ ++ HOST_CAP2_SDS = BIT(3), /* Support device sleep */ ++ HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */ ++ HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */ + + /* registers for each SATA port */ + PORT_LST_ADDR = 0x00, /* command list DMA addr */ +@@ -128,24 +129,24 @@ enum { + PORT_DEVSLP = 0x44, /* device sleep */ + + /* PORT_IRQ_{STAT,MASK} bits */ +- PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ +- PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ +- PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ +- PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ +- PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ +- PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ +- PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ +- PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ +- +- PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ +- PORT_IRQ_DMPS = (1 << 7), /* mechanical presence status */ +- PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ +- PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ +- PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ +- PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ +- PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ +- PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ +- PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ ++ PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */ ++ PORT_IRQ_TF_ERR = BIT(30), /* task file error */ ++ PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */ ++ PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */ ++ PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */ ++ PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */ ++ PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */ ++ PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */ ++ ++ PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */ ++ PORT_IRQ_DMPS = BIT(7), /* mechanical presence status */ ++ PORT_IRQ_CONNECT = BIT(6), /* port connect change status */ ++ PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */ ++ PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */ ++ PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */ ++ PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */ ++ PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */ ++ PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */ + + PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | + PORT_IRQ_IF_ERR | +@@ -161,27 +162,27 @@ enum { + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, + + /* PORT_CMD bits */ +- PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ +- PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ +- PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ +- PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ +- PORT_CMD_ESP = (1 << 21), /* External Sata Port */ +- PORT_CMD_CPD = (1 << 20), /* Cold Presence Detection */ +- PORT_CMD_MPSP = (1 << 19), /* Mechanical Presence Switch */ +- PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */ +- PORT_CMD_PMP = (1 << 17), /* PMP attached */ +- PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ +- PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ +- PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ +- PORT_CMD_CLO = (1 << 3), /* Command list override */ +- PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ +- PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ +- PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ +- +- PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ +- PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ +- PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ +- PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ ++ PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */ ++ PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */ ++ PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */ ++ PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */ ++ PORT_CMD_ESP = BIT(21), /* External Sata Port */ ++ PORT_CMD_CPD = BIT(20), /* Cold Presence Detection */ ++ PORT_CMD_MPSP = BIT(19), /* Mechanical Presence Switch */ ++ PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */ ++ PORT_CMD_PMP = BIT(17), /* PMP attached */ ++ PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */ ++ PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */ ++ PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */ ++ PORT_CMD_CLO = BIT(3), /* Command list override */ ++ PORT_CMD_POWER_ON = BIT(2), /* Power up device */ ++ PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */ ++ PORT_CMD_START = BIT(0), /* Enable port DMA engine */ ++ ++ PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */ ++ PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */ ++ PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */ ++ PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */ + + /* PORT_CMD capabilities mask */ + PORT_CMD_CAP = PORT_CMD_HPCP | PORT_CMD_MPSP | +@@ -192,9 +193,9 @@ enum { + PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ + PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ + PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ +- PORT_FBS_SDE = (1 << 2), /* FBS single device error */ +- PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ +- PORT_FBS_EN = (1 << 0), /* Enable FBS */ ++ PORT_FBS_SDE = BIT(2), /* FBS single device error */ ++ PORT_FBS_DEC = BIT(1), /* FBS device error clear */ ++ PORT_FBS_EN = BIT(0), /* Enable FBS */ + + /* PORT_DEVSLP bits */ + PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ +@@ -202,50 +203,50 @@ enum { + PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ + PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ + PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ +- PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ +- PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ ++ PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */ ++ PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */ + + /* hpriv->flags bits */ + + #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) + +- AHCI_HFLAG_NO_NCQ = (1 << 0), +- AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ +- AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ +- AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ +- AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ +- AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ +- AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ +- AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ +- AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ +- AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ +- AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as +- link offline */ +- AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ +- AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ +- AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ +- AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on +- port start (wait until +- error-handling stage) */ +- AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ +- AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ ++ AHCI_HFLAG_NO_NCQ = BIT(0), ++ AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */ ++ AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */ ++ AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */ ++ AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */ ++ AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */ ++ AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */ ++ AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */ ++ AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */ ++ AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */ ++ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as ++ link offline */ ++ AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */ ++ AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */ ++ AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */ ++ AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on ++ port start (wait until ++ error-handling stage) */ ++ AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */ ++ AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */ + + #ifdef CONFIG_PCI_MSI +- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ ++ AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */ + #else + /* compile out MSI infrastructure */ + AHCI_HFLAG_MULTI_MSI = 0, + #endif +- AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ +- AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */ +- AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read +- only registers */ +- AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use +- SATA_MOBILE_LPM_POLICY +- as default lpm_policy */ +- AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during +- suspend/resume */ +- AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ ++ AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */ ++ AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */ ++ AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read ++ only registers */ ++ AHCI_HFLAG_USE_LPM_POLICY = BIT(25), /* chipset that should use ++ SATA_MOBILE_LPM_POLICY ++ as default lpm_policy */ ++ AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during ++ suspend/resume */ ++ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */ + + /* ap->flags bits */ + +@@ -261,22 +262,22 @@ enum { + EM_MAX_RETRY = 5, + + /* em_ctl bits */ +- EM_CTL_RST = (1 << 9), /* Reset */ +- EM_CTL_TM = (1 << 8), /* Transmit Message */ +- EM_CTL_MR = (1 << 0), /* Message Received */ +- EM_CTL_ALHD = (1 << 26), /* Activity LED */ +- EM_CTL_XMT = (1 << 25), /* Transmit Only */ +- EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ +- EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ +- EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ +- EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ +- EM_CTL_LED = (1 << 16), /* LED messages supported */ ++ EM_CTL_RST = BIT(9), /* Reset */ ++ EM_CTL_TM = BIT(8), /* Transmit Message */ ++ EM_CTL_MR = BIT(0), /* Message Received */ ++ EM_CTL_ALHD = BIT(26), /* Activity LED */ ++ EM_CTL_XMT = BIT(25), /* Transmit Only */ ++ EM_CTL_SMB = BIT(24), /* Single Message Buffer */ ++ EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */ ++ EM_CTL_SES = BIT(18), /* SES-2 messages supported */ ++ EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */ ++ EM_CTL_LED = BIT(16), /* LED messages supported */ + + /* em message type */ +- EM_MSG_TYPE_LED = (1 << 0), /* LED */ +- EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ +- EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ +- EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ ++ EM_MSG_TYPE_LED = BIT(0), /* LED */ ++ EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */ ++ EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */ ++ EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */ + }; + + struct ahci_cmd_hdr { +diff --git a/drivers/base/dd.c b/drivers/base/dd.c +index 3dda62503102f..9ae2b5c4fc496 100644 +--- a/drivers/base/dd.c ++++ b/drivers/base/dd.c +@@ -1162,7 +1162,11 @@ static int __driver_attach(struct device *dev, void *data) + return 0; + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d\n", ret); +- return ret; ++ /* ++ * Driver could not match with device, but may match with ++ * another device on the bus. ++ */ ++ return 0; + } /* ret > 0 means positive match */ + + if (driver_allows_async_probing(drv)) { +diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c +index 4a42186ff1112..083459028a4b8 100644 +--- a/drivers/bus/mhi/host/pm.c ++++ b/drivers/bus/mhi/host/pm.c +@@ -301,7 +301,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) + read_lock_irq(&mhi_chan->lock); + + /* Only ring DB if ring is not empty */ +- if (tre_ring->base && tre_ring->wp != tre_ring->rp) ++ if (tre_ring->base && tre_ring->wp != tre_ring->rp && ++ mhi_chan->ch_state == MHI_CH_STATE_ENABLED) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index d5ee52be176d3..5d403fb5bd929 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -1330,6 +1330,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) + unsigned long flags; + struct cmd_rcvr *rcvr; + struct cmd_rcvr *rcvrs = NULL; ++ struct module *owner; + + if (!acquire_ipmi_user(user, &i)) { + /* +@@ -1392,8 +1393,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user) + kfree(rcvr); + } + ++ owner = intf->owner; + kref_put(&intf->refcount, intf_free); +- module_put(intf->owner); ++ module_put(owner); + } + + int ipmi_destroy_user(struct ipmi_user *user) +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 6e357ad76f2eb..abddd7e43a9a6 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -2153,6 +2153,20 @@ skip_fallback_noirq: + } + module_init(init_ipmi_si); + ++static void wait_msg_processed(struct smi_info *smi_info) ++{ ++ unsigned long jiffies_now; ++ long time_diff; ++ ++ while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { ++ jiffies_now = jiffies; ++ time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) ++ * SI_USEC_PER_JIFFY); ++ smi_event_handler(smi_info, time_diff); ++ schedule_timeout_uninterruptible(1); ++ } ++} ++ + static void shutdown_smi(void *send_info) + { + struct smi_info *smi_info = send_info; +@@ -2187,16 +2201,13 @@ static void shutdown_smi(void *send_info) + * in the BMC. Note that timers and CPU interrupts are off, + * so no need for locks. + */ +- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { +- poll(smi_info); +- schedule_timeout_uninterruptible(1); +- } ++ wait_msg_processed(smi_info); ++ + if (smi_info->handlers) + disable_si_irq(smi_info); +- while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { +- poll(smi_info); +- schedule_timeout_uninterruptible(1); +- } ++ ++ wait_msg_processed(smi_info); ++ + if (smi_info->handlers) + smi_info->handlers->cleanup(smi_info->si_sm); + +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 69754155300ea..f5868dddbb618 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -160,6 +160,9 @@ EXPORT_SYMBOL(wait_for_random_bytes); + * u8 get_random_u8() + * u16 get_random_u16() + * u32 get_random_u32() ++ * u32 get_random_u32_below(u32 ceil) ++ * u32 get_random_u32_above(u32 floor) ++ * u32 get_random_u32_inclusive(u32 floor, u32 ceil) + * u64 get_random_u64() + * unsigned long get_random_long() + * +@@ -510,6 +513,41 @@ DEFINE_BATCHED_ENTROPY(u16) + DEFINE_BATCHED_ENTROPY(u32) + DEFINE_BATCHED_ENTROPY(u64) + ++u32 __get_random_u32_below(u32 ceil) ++{ ++ /* ++ * This is the slow path for variable ceil. It is still fast, most of ++ * the time, by doing traditional reciprocal multiplication and ++ * opportunistically comparing the lower half to ceil itself, before ++ * falling back to computing a larger bound, and then rejecting samples ++ * whose lower half would indicate a range indivisible by ceil. The use ++ * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable ++ * in 32-bits. ++ */ ++ u32 rand = get_random_u32(); ++ u64 mult; ++ ++ /* ++ * This function is technically undefined for ceil == 0, and in fact ++ * for the non-underscored constant version in the header, we build bug ++ * on that. But for the non-constant case, it's convenient to have that ++ * evaluate to being a straight call to get_random_u32(), so that ++ * get_random_u32_inclusive() can work over its whole range without ++ * undefined behavior. ++ */ ++ if (unlikely(!ceil)) ++ return rand; ++ ++ mult = (u64)ceil * rand; ++ if (unlikely((u32)mult < ceil)) { ++ u32 bound = -ceil % ceil; ++ while (unlikely((u32)mult < bound)) ++ mult = (u64)ceil * get_random_u32(); ++ } ++ return mult >> 32; ++} ++EXPORT_SYMBOL(__get_random_u32_below); ++ + #ifdef CONFIG_SMP + /* + * This function is called when the CPU is coming up, with entry +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 69b3d61852ac6..7e56a42750ea5 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1207,6 +1207,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) + if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) + goto err_free_rcpumask; + ++ init_completion(&policy->kobj_unregister); + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, + cpufreq_global_kobject, "policy%u", cpu); + if (ret) { +@@ -1245,7 +1246,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) + init_rwsem(&policy->rwsem); + spin_lock_init(&policy->transition_lock); + init_waitqueue_head(&policy->transition_wait); +- init_completion(&policy->kobj_unregister); + INIT_WORK(&policy->update, handle_update); + + policy->cpu = cpu; +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig +index c30b5a39c2ac2..4a618d80e106f 100644 +--- a/drivers/crypto/Kconfig ++++ b/drivers/crypto/Kconfig +@@ -790,8 +790,8 @@ config CRYPTO_DEV_CCREE + select CRYPTO_ECB + select CRYPTO_CTR + select CRYPTO_XTS +- select CRYPTO_SM4 +- select CRYPTO_SM3 ++ select CRYPTO_SM4_GENERIC ++ select CRYPTO_SM3_GENERIC + help + Say 'Y' to enable a driver for the REE interface of the Arm + TrustZone CryptoCell family of processors. Currently the +diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c +index 792d6da7f0c07..084d052fddccb 100644 +--- a/drivers/crypto/ccp/sp-pci.c ++++ b/drivers/crypto/ccp/sp-pci.c +@@ -381,6 +381,15 @@ static const struct psp_vdata pspv3 = { + .inten_reg = 0x10690, + .intsts_reg = 0x10694, + }; ++ ++static const struct psp_vdata pspv4 = { ++ .sev = &sevv2, ++ .tee = &teev1, ++ .feature_reg = 0x109fc, ++ .inten_reg = 0x10690, ++ .intsts_reg = 0x10694, ++}; ++ + #endif + + static const struct sp_dev_vdata dev_vdata[] = { +@@ -426,7 +435,7 @@ static const struct sp_dev_vdata dev_vdata[] = { + { /* 5 */ + .bar = 2, + #ifdef CONFIG_CRYPTO_DEV_SP_PSP +- .psp_vdata = &pspv2, ++ .psp_vdata = &pspv4, + #endif + }, + { /* 6 */ +diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig +index 27e1fa9120639..743ce4fc3158c 100644 +--- a/drivers/crypto/hisilicon/Kconfig ++++ b/drivers/crypto/hisilicon/Kconfig +@@ -26,7 +26,7 @@ config CRYPTO_DEV_HISI_SEC2 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 +- select CRYPTO_SM4 ++ select CRYPTO_SM4_GENERIC + depends on PCI && PCI_MSI + depends on UACCE || UACCE=n + depends on ARM64 || (COMPILE_TEST && 64BIT) +diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c +index 31e24df18877f..20d0dcd50344b 100644 +--- a/drivers/crypto/n2_core.c ++++ b/drivers/crypto/n2_core.c +@@ -1229,6 +1229,7 @@ struct n2_hash_tmpl { + const u8 *hash_init; + u8 hw_op_hashsz; + u8 digest_size; ++ u8 statesize; + u8 block_size; + u8 auth_type; + u8 hmac_type; +@@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { + .hmac_type = AUTH_TYPE_HMAC_MD5, + .hw_op_hashsz = MD5_DIGEST_SIZE, + .digest_size = MD5_DIGEST_SIZE, ++ .statesize = sizeof(struct md5_state), + .block_size = MD5_HMAC_BLOCK_SIZE }, + { .name = "sha1", + .hash_zero = sha1_zero_message_hash, +@@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { + .hmac_type = AUTH_TYPE_HMAC_SHA1, + .hw_op_hashsz = SHA1_DIGEST_SIZE, + .digest_size = SHA1_DIGEST_SIZE, ++ .statesize = sizeof(struct sha1_state), + .block_size = SHA1_BLOCK_SIZE }, + { .name = "sha256", + .hash_zero = sha256_zero_message_hash, +@@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { + .hmac_type = AUTH_TYPE_HMAC_SHA256, + .hw_op_hashsz = SHA256_DIGEST_SIZE, + .digest_size = SHA256_DIGEST_SIZE, ++ .statesize = sizeof(struct sha256_state), + .block_size = SHA256_BLOCK_SIZE }, + { .name = "sha224", + .hash_zero = sha224_zero_message_hash, +@@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { + .hmac_type = AUTH_TYPE_RESERVED, + .hw_op_hashsz = SHA256_DIGEST_SIZE, + .digest_size = SHA224_DIGEST_SIZE, ++ .statesize = sizeof(struct sha256_state), + .block_size = SHA224_BLOCK_SIZE }, + }; + #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) +@@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) + + halg = &ahash->halg; + halg->digestsize = tmpl->digest_size; ++ halg->statesize = tmpl->statesize; + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); +diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c +index f9ae5ad284ffb..c4f32c32dfd50 100644 +--- a/drivers/cxl/core/region.c ++++ b/drivers/cxl/core/region.c +@@ -1226,7 +1226,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled_target; + struct cxl_memdev *cxlmd_target; + +- cxled_target = p->targets[pos]; ++ cxled_target = p->targets[i]; + if (!cxled_target) + continue; + +@@ -1923,6 +1923,9 @@ static int cxl_region_probe(struct device *dev) + */ + up_read(&cxl_region_rwsem); + ++ if (rc) ++ return rc; ++ + switch (cxlr->mode) { + case CXL_DECODER_PMEM: + return devm_cxl_add_pmem_region(cxlr); +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c +index 63347a5ae5999..8c5f6f7fca112 100644 +--- a/drivers/devfreq/devfreq.c ++++ b/drivers/devfreq/devfreq.c +@@ -776,8 +776,7 @@ static void remove_sysfs_files(struct devfreq *devfreq, + * @dev: the device to add devfreq feature. + * @profile: device-specific profile to run devfreq. + * @governor_name: name of the policy to choose frequency. +- * @data: private data for the governor. The devfreq framework does not +- * touch this value. ++ * @data: devfreq driver pass to governors, governor should not change it. + */ + struct devfreq *devfreq_add_device(struct device *dev, + struct devfreq_dev_profile *profile, +@@ -1011,8 +1010,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res) + * @dev: the device to add devfreq feature. + * @profile: device-specific profile to run devfreq. + * @governor_name: name of the policy to choose frequency. +- * @data: private data for the governor. The devfreq framework does not +- * touch this value. ++ * @data: devfreq driver pass to governors, governor should not change it. + * + * This function manages automatically the memory of devfreq device using device + * resource management and simplify the free operation for memory of devfreq +diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c +index ab9db7adb3ade..d69672ccacc49 100644 +--- a/drivers/devfreq/governor_userspace.c ++++ b/drivers/devfreq/governor_userspace.c +@@ -21,7 +21,7 @@ struct userspace_data { + + static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) + { +- struct userspace_data *data = df->data; ++ struct userspace_data *data = df->governor_data; + + if (data->valid) + *freq = data->user_frequency; +@@ -40,7 +40,7 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, + int err = 0; + + mutex_lock(&devfreq->lock); +- data = devfreq->data; ++ data = devfreq->governor_data; + + sscanf(buf, "%lu", &wanted); + data->user_frequency = wanted; +@@ -60,7 +60,7 @@ static ssize_t set_freq_show(struct device *dev, + int err = 0; + + mutex_lock(&devfreq->lock); +- data = devfreq->data; ++ data = devfreq->governor_data; + + if (data->valid) + err = sprintf(buf, "%lu\n", data->user_frequency); +@@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq) + goto out; + } + data->valid = false; +- devfreq->data = data; ++ devfreq->governor_data = data; + + err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); + out: +@@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq) + if (devfreq->dev.kobj.sd) + sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); + +- kfree(devfreq->data); +- devfreq->data = NULL; ++ kfree(devfreq->governor_data); ++ devfreq->governor_data = NULL; + } + + static int devfreq_userspace_handler(struct devfreq *devfreq, +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +index 0a638c97702a5..15f63452a9bec 100644 +--- a/drivers/edac/edac_mc_sysfs.c ++++ b/drivers/edac/edac_mc_sysfs.c +@@ -298,6 +298,14 @@ DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 6); + DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 7); ++DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 8); ++DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 9); ++DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 10); ++DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 11); + + /* Total possible dynamic DIMM Label attribute file table */ + static struct attribute *dynamic_csrow_dimm_attr[] = { +@@ -309,6 +317,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { + &dev_attr_legacy_ch5_dimm_label.attr.attr, + &dev_attr_legacy_ch6_dimm_label.attr.attr, + &dev_attr_legacy_ch7_dimm_label.attr.attr, ++ &dev_attr_legacy_ch8_dimm_label.attr.attr, ++ &dev_attr_legacy_ch9_dimm_label.attr.attr, ++ &dev_attr_legacy_ch10_dimm_label.attr.attr, ++ &dev_attr_legacy_ch11_dimm_label.attr.attr, + NULL + }; + +@@ -329,6 +341,14 @@ DEVICE_CHANNEL(ch6_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 6); + DEVICE_CHANNEL(ch7_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 7); ++DEVICE_CHANNEL(ch8_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 8); ++DEVICE_CHANNEL(ch9_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 9); ++DEVICE_CHANNEL(ch10_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 10); ++DEVICE_CHANNEL(ch11_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 11); + + /* Total possible dynamic ce_count attribute file table */ + static struct attribute *dynamic_csrow_ce_count_attr[] = { +@@ -340,6 +360,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { + &dev_attr_legacy_ch5_ce_count.attr.attr, + &dev_attr_legacy_ch6_ce_count.attr.attr, + &dev_attr_legacy_ch7_ce_count.attr.attr, ++ &dev_attr_legacy_ch8_ce_count.attr.attr, ++ &dev_attr_legacy_ch9_ce_count.attr.attr, ++ &dev_attr_legacy_ch10_ce_count.attr.attr, ++ &dev_attr_legacy_ch11_ce_count.attr.attr, + NULL + }; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 913f22d41673d..0be85d19a6f3e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3005,14 +3005,15 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) + continue; + } + +- /* skip suspend of gfx and psp for S0ix ++ /* skip suspend of gfx/mes and psp for S0ix + * gfx is in gfxoff state, so on resume it will exit gfxoff just + * like at runtime. PSP is also part of the always on hardware + * so no need to suspend it. + */ + if (adev->in_s0ix && + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || +- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) ++ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || ++ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) + continue; + + /* XXX handle errors */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +index bf2d50c8c92ad..d8dfbb9b735dc 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +@@ -2040,6 +2040,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, + "See modparam exp_hw_support\n"); + return -ENODEV; + } ++ /* differentiate between P10 and P11 asics with the same DID */ ++ if (pdev->device == 0x67FF && ++ (pdev->revision == 0xE3 || ++ pdev->revision == 0xE7 || ++ pdev->revision == 0xF3 || ++ pdev->revision == 0xF7)) { ++ flags &= ~AMD_ASIC_MASK; ++ flags |= CHIP_POLARIS10; ++ } + + /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, + * however, SME requires an indirect IOMMU mapping because the encryption +@@ -2109,12 +2118,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, + + pci_set_drvdata(pdev, ddev); + +- ret = amdgpu_driver_load_kms(adev, ent->driver_data); ++ ret = amdgpu_driver_load_kms(adev, flags); + if (ret) + goto err_pci; + + retry_init: +- ret = drm_dev_register(ddev, ent->driver_data); ++ ret = drm_dev_register(ddev, flags); + if (ret == -EAGAIN && ++retry <= 3) { + DRM_INFO("retry init %d\n", retry); + /* Don't request EX mode too frequently which is attacking */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +index 2e8f6cd7a7293..3df13d841e4d5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -1509,7 +1509,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) + uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, + uint32_t domain) + { +- if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { ++ if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) && ++ ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) + domain = AMDGPU_GEM_DOMAIN_GTT; +diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +index f141fadd2d86f..725876b4f02ed 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +@@ -1339,7 +1339,8 @@ static int mes_v11_0_late_init(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + +- if (!amdgpu_in_reset(adev) && ++ /* it's only intended for use in mes_self_test case, not for s0ix and reset */ ++ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && + (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) + amdgpu_mes_self_test(adev); + +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +index 998b5d17b271b..0e664d0cc8d51 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +@@ -319,7 +319,7 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) + + tmp = mmMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); +- WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); ++ WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp); + } + + static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +index 1b027d069ab40..4638ea7c2eec5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +@@ -243,7 +243,7 @@ static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev) + + tmp = mmMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); +- WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); ++ WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp); + } + + static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev) +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +index a1d26c4d80b8c..16cc82215e2e1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +@@ -275,7 +275,7 @@ static void mmhub_v3_0_init_cache_regs(struct amdgpu_device *adev) + + tmp = regMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); +- WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp); ++ WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); + } + + static void mmhub_v3_0_enable_system_domain(struct amdgpu_device *adev) +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +index e8058edc1d108..6bdf2ef0298d6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +@@ -269,7 +269,7 @@ static void mmhub_v3_0_1_init_cache_regs(struct amdgpu_device *adev) + + tmp = regMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); +- WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp); ++ WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); + } + + static void mmhub_v3_0_1_enable_system_domain(struct amdgpu_device *adev) +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +index 770be0a8f7ce7..45465acaa943a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +@@ -268,7 +268,7 @@ static void mmhub_v3_0_2_init_cache_regs(struct amdgpu_device *adev) + + tmp = regMMVM_L2_CNTL5_DEFAULT; + tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); +- WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp); ++ WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); + } + + static void mmhub_v3_0_2_enable_system_domain(struct amdgpu_device *adev) +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 512c32327eb11..c2c26fbea5129 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -1512,6 +1512,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): ++ case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + init_data.flags.gpu_vm_support = true; +diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +index b76f0f7e42998..d6b964cf73bd1 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h ++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +@@ -522,9 +522,9 @@ typedef enum { + TEMP_HOTSPOT_M, + TEMP_MEM, + TEMP_VR_GFX, ++ TEMP_VR_SOC, + TEMP_VR_MEM0, + TEMP_VR_MEM1, +- TEMP_VR_SOC, + TEMP_VR_U, + TEMP_LIQUID0, + TEMP_LIQUID1, +diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +index 865d6358918d2..a9122b3b15322 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h ++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +@@ -28,6 +28,7 @@ + #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF + #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 + #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 ++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34 + #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 + #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 + #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +index 89f0f6eb19f3d..8e4830a311bde 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +@@ -289,6 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; + break; + case IP_VERSION(13, 0, 0): ++ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0; ++ break; + case IP_VERSION(13, 0, 10): + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10; + break; +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +index f0121d1716301..b8430601304f0 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +@@ -187,6 +187,8 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = + FEA_MAP(MEM_TEMP_READ), + FEA_MAP(ATHUB_MMHUB_PG), + FEA_MAP(SOC_PCC), ++ [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, ++ [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + }; + + static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { +@@ -517,6 +519,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) + dpm_table); + if (ret) + return ret; ++ ++ /* ++ * Update the reported maximum shader clock to the value ++ * which can be guarded to be achieved on all cards. This ++ * is aligned with Window setting. And considering that value ++ * might be not the peak frequency the card can achieve, it ++ * is normal some real-time clock frequency can overtake this ++ * labelled maximum clock frequency(for example in pp_dpm_sclk ++ * sysfs output). ++ */ ++ if (skutable->DriverReportedClocks.GameClockAc && ++ (dpm_table->dpm_levels[dpm_table->count - 1].value > ++ skutable->DriverReportedClocks.GameClockAc)) { ++ dpm_table->dpm_levels[dpm_table->count - 1].value = ++ skutable->DriverReportedClocks.GameClockAc; ++ dpm_table->max = skutable->DriverReportedClocks.GameClockAc; ++ } + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; +@@ -779,6 +798,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, + return ret; + } + ++static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu, ++ enum smu_clk_type clk_type, ++ uint32_t *min, ++ uint32_t *max) ++{ ++ struct smu_13_0_dpm_context *dpm_context = ++ smu->smu_dpm.dpm_context; ++ struct smu_13_0_dpm_table *dpm_table; ++ ++ switch (clk_type) { ++ case SMU_MCLK: ++ case SMU_UCLK: ++ /* uclk dpm table */ ++ dpm_table = &dpm_context->dpm_tables.uclk_table; ++ break; ++ case SMU_GFXCLK: ++ case SMU_SCLK: ++ /* gfxclk dpm table */ ++ dpm_table = &dpm_context->dpm_tables.gfx_table; ++ break; ++ case SMU_SOCCLK: ++ /* socclk dpm table */ ++ dpm_table = &dpm_context->dpm_tables.soc_table; ++ break; ++ case SMU_FCLK: ++ /* fclk dpm table */ ++ dpm_table = &dpm_context->dpm_tables.fclk_table; ++ break; ++ case SMU_VCLK: ++ case SMU_VCLK1: ++ /* vclk dpm table */ ++ dpm_table = &dpm_context->dpm_tables.vclk_table; ++ break; ++ case SMU_DCLK: ++ case SMU_DCLK1: ++ /* dclk dpm table */ ++ dpm_table = &dpm_context->dpm_tables.dclk_table; ++ break; ++ default: ++ dev_err(smu->adev->dev, "Unsupported clock type!\n"); ++ return -EINVAL; ++ } ++ ++ if (min) ++ *min = dpm_table->min; ++ if (max) ++ *max = dpm_table->max; ++ ++ return 0; ++} ++ + static int smu_v13_0_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, +@@ -1281,9 +1351,17 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) + &dpm_context->dpm_tables.fclk_table; + struct smu_umd_pstate_table *pstate_table = + &smu->pstate_table; ++ struct smu_table_context *table_context = &smu->smu_table; ++ PPTable_t *pptable = table_context->driver_pptable; ++ DriverReportedClocks_t driver_clocks = ++ pptable->SkuTable.DriverReportedClocks; + + pstate_table->gfxclk_pstate.min = gfx_table->min; +- pstate_table->gfxclk_pstate.peak = gfx_table->max; ++ if (driver_clocks.GameClockAc && ++ (driver_clocks.GameClockAc < gfx_table->max)) ++ pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; ++ else ++ pstate_table->gfxclk_pstate.peak = gfx_table->max; + + pstate_table->uclk_pstate.min = mem_table->min; + pstate_table->uclk_pstate.peak = mem_table->max; +@@ -1300,12 +1378,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) + pstate_table->fclk_pstate.min = fclk_table->min; + pstate_table->fclk_pstate.peak = fclk_table->max; + +- /* +- * For now, just use the mininum clock frequency. +- * TODO: update them when the real pstate settings available +- */ +- pstate_table->gfxclk_pstate.standard = gfx_table->min; +- pstate_table->uclk_pstate.standard = mem_table->min; ++ if (driver_clocks.BaseClockAc && ++ driver_clocks.BaseClockAc < gfx_table->max) ++ pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; ++ else ++ pstate_table->gfxclk_pstate.standard = gfx_table->max; ++ pstate_table->uclk_pstate.standard = mem_table->max; + pstate_table->socclk_pstate.standard = soc_table->min; + pstate_table->vclk_pstate.standard = vclk_table->min; + pstate_table->dclk_pstate.standard = dclk_table->min; +@@ -1339,12 +1417,23 @@ out: + static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu, + uint32_t *speed) + { ++ int ret; ++ + if (!speed) + return -EINVAL; + +- return smu_v13_0_0_get_smu_metrics_data(smu, +- METRICS_CURR_FANPWM, +- speed); ++ ret = smu_v13_0_0_get_smu_metrics_data(smu, ++ METRICS_CURR_FANPWM, ++ speed); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); ++ return ret; ++ } ++ ++ /* Convert the PMFW output which is in percent to pwm(255) based */ ++ *speed = MIN(*speed * 255 / 100, 255); ++ ++ return 0; + } + + static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu, +@@ -1813,7 +1902,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { + .get_enabled_mask = smu_cmn_get_enabled_mask, + .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, + .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, +- .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, ++ .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq, + .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, + .read_sensor = smu_v13_0_0_read_sensor, + .feature_is_enabled = smu_cmn_feature_is_enabled, +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +index 39deb06a86ba3..222924363a681 100644 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +@@ -189,6 +189,8 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = + FEA_MAP(MEM_TEMP_READ), + FEA_MAP(ATHUB_MMHUB_PG), + FEA_MAP(SOC_PCC), ++ [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, ++ [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + }; + + static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { +@@ -1359,12 +1361,23 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) + static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu, + uint32_t *speed) + { ++ int ret; ++ + if (!speed) + return -EINVAL; + +- return smu_v13_0_7_get_smu_metrics_data(smu, +- METRICS_CURR_FANPWM, +- speed); ++ ret = smu_v13_0_7_get_smu_metrics_data(smu, ++ METRICS_CURR_FANPWM, ++ speed); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); ++ return ret; ++ } ++ ++ /* Convert the PMFW output which is in percent to pwm(255) based */ ++ *speed = MIN(*speed * 255 / 100, 255); ++ ++ return 0; + } + + static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu, +diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c +index 61c29ce74b035..27de2a97f1d11 100644 +--- a/drivers/gpu/drm/drm_connector.c ++++ b/drivers/gpu/drm/drm_connector.c +@@ -582,6 +582,9 @@ void drm_connector_cleanup(struct drm_connector *connector) + mutex_destroy(&connector->mutex); + + memset(connector, 0, sizeof(*connector)); ++ ++ if (dev->registered) ++ drm_sysfs_hotplug_event(dev); + } + EXPORT_SYMBOL(drm_connector_cleanup); + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c +index cc386f8a7116e..5cf13e52f7c94 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c +@@ -258,7 +258,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( + if (mapping->use == 0) { + mutex_lock(&mmu_context->lock); + if (mapping->context == mmu_context) +- mapping->use += 1; ++ if (va && mapping->iova != va) { ++ etnaviv_iommu_reap_mapping(mapping); ++ mapping = NULL; ++ } else { ++ mapping->use += 1; ++ } + else + mapping = NULL; + mutex_unlock(&mmu_context->lock); +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +index dc1aa738c4f18..55479cb8b1ac3 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +@@ -135,6 +135,19 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, + drm_mm_remove_node(&mapping->vram_node); + } + ++void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) ++{ ++ struct etnaviv_iommu_context *context = mapping->context; ++ ++ lockdep_assert_held(&context->lock); ++ WARN_ON(mapping->use); ++ ++ etnaviv_iommu_remove_mapping(context, mapping); ++ etnaviv_iommu_context_put(mapping->context); ++ mapping->context = NULL; ++ list_del_init(&mapping->mmu_node); ++} ++ + static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, + struct drm_mm_node *node, size_t size) + { +@@ -202,10 +215,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, + * this mapping. + */ + list_for_each_entry_safe(m, n, &list, scan_node) { +- etnaviv_iommu_remove_mapping(context, m); +- etnaviv_iommu_context_put(m->context); +- m->context = NULL; +- list_del_init(&m->mmu_node); ++ etnaviv_iommu_reap_mapping(m); + list_del_init(&m->scan_node); + } + +@@ -257,10 +267,7 @@ static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, + } + + list_for_each_entry_safe(m, n, &scan_list, scan_node) { +- etnaviv_iommu_remove_mapping(context, m); +- etnaviv_iommu_context_put(m->context); +- m->context = NULL; +- list_del_init(&m->mmu_node); ++ etnaviv_iommu_reap_mapping(m); + list_del_init(&m->scan_node); + } + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +index e4a0b7d09c2ea..c01a147f0dfdd 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +@@ -91,6 +91,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, + struct etnaviv_vram_mapping *mapping, u64 va); + void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, + struct etnaviv_vram_mapping *mapping); ++void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping); + + int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx, + struct etnaviv_vram_mapping *mapping, +diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +index 75e8cc4337c93..fce69fa446d58 100644 +--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c ++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +@@ -137,9 +137,9 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, + return ffs(intel_dsi->ports) - 1; + + if (seq_port) { +- if (intel_dsi->ports & PORT_B) ++ if (intel_dsi->ports & BIT(PORT_B)) + return PORT_B; +- else if (intel_dsi->ports & PORT_C) ++ else if (intel_dsi->ports & BIT(PORT_C)) + return PORT_C; + } + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +index 845023c14eb36..f461e34cc5f07 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +@@ -729,32 +729,69 @@ static int eb_reserve(struct i915_execbuffer *eb) + bool unpinned; + + /* +- * Attempt to pin all of the buffers into the GTT. +- * This is done in 2 phases: ++ * We have one more buffers that we couldn't bind, which could be due to ++ * various reasons. To resolve this we have 4 passes, with every next ++ * level turning the screws tighter: + * +- * 1. Unbind all objects that do not match the GTT constraints for +- * the execbuffer (fenceable, mappable, alignment etc). +- * 2. Bind new objects. ++ * 0. Unbind all objects that do not match the GTT constraints for the ++ * execbuffer (fenceable, mappable, alignment etc). Bind all new ++ * objects. This avoids unnecessary unbinding of later objects in order ++ * to make room for the earlier objects *unless* we need to defragment. + * +- * This avoid unnecessary unbinding of later objects in order to make +- * room for the earlier objects *unless* we need to defragment. ++ * 1. Reorder the buffers, where objects with the most restrictive ++ * placement requirements go first (ignoring fixed location buffers for ++ * now). For example, objects needing the mappable aperture (the first ++ * 256M of GTT), should go first vs objects that can be placed just ++ * about anywhere. Repeat the previous pass. + * +- * Defragmenting is skipped if all objects are pinned at a fixed location. ++ * 2. Consider buffers that are pinned at a fixed location. Also try to ++ * evict the entire VM this time, leaving only objects that we were ++ * unable to lock. Try again to bind the buffers. (still using the new ++ * buffer order). ++ * ++ * 3. We likely have object lock contention for one or more stubborn ++ * objects in the VM, for which we need to evict to make forward ++ * progress (perhaps we are fighting the shrinker?). When evicting the ++ * VM this time around, anything that we can't lock we now track using ++ * the busy_bo, using the full lock (after dropping the vm->mutex to ++ * prevent deadlocks), instead of trylock. We then continue to evict the ++ * VM, this time with the stubborn object locked, which we can now ++ * hopefully unbind (if still bound in the VM). Repeat until the VM is ++ * evicted. Finally we should be able bind everything. + */ +- for (pass = 0; pass <= 2; pass++) { ++ for (pass = 0; pass <= 3; pass++) { + int pin_flags = PIN_USER | PIN_VALIDATE; + + if (pass == 0) + pin_flags |= PIN_NONBLOCK; + + if (pass >= 1) +- unpinned = eb_unbind(eb, pass == 2); ++ unpinned = eb_unbind(eb, pass >= 2); + + if (pass == 2) { + err = mutex_lock_interruptible(&eb->context->vm->mutex); + if (!err) { +- err = i915_gem_evict_vm(eb->context->vm, &eb->ww); ++ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL); ++ mutex_unlock(&eb->context->vm->mutex); ++ } ++ if (err) ++ return err; ++ } ++ ++ if (pass == 3) { ++retry: ++ err = mutex_lock_interruptible(&eb->context->vm->mutex); ++ if (!err) { ++ struct drm_i915_gem_object *busy_bo = NULL; ++ ++ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo); + mutex_unlock(&eb->context->vm->mutex); ++ if (err && busy_bo) { ++ err = i915_gem_object_lock(busy_bo, &eb->ww); ++ i915_gem_object_put(busy_bo); ++ if (!err) ++ goto retry; ++ } + } + if (err) + return err; +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c +index e63329bc80659..354c1d6dab846 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c +@@ -369,7 +369,7 @@ retry: + if (vma == ERR_PTR(-ENOSPC)) { + ret = mutex_lock_interruptible(&ggtt->vm.mutex); + if (!ret) { +- ret = i915_gem_evict_vm(&ggtt->vm, &ww); ++ ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); + mutex_unlock(&ggtt->vm.mutex); + } + if (ret) +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c +index 369006c5317f2..a40bc17acead8 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c +@@ -761,6 +761,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) + if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) + return false; + ++ if (obj->flags & I915_BO_ALLOC_CCS_AUX) ++ return true; ++ + for (i = 0; i < obj->mm.n_placements; i++) { + /* Compression is not allowed for the objects with smem placement */ + if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +index d0d6772e6f36a..ab4c2f90a5643 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +@@ -327,16 +327,18 @@ struct drm_i915_gem_object { + * dealing with userspace objects the CPU fault handler is free to ignore this. + */ + #define I915_BO_ALLOC_GPU_ONLY BIT(6) ++#define I915_BO_ALLOC_CCS_AUX BIT(7) + #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \ + I915_BO_ALLOC_VOLATILE | \ + I915_BO_ALLOC_CPU_CLEAR | \ + I915_BO_ALLOC_USER | \ + I915_BO_ALLOC_PM_VOLATILE | \ + I915_BO_ALLOC_PM_EARLY | \ +- I915_BO_ALLOC_GPU_ONLY) +-#define I915_BO_READONLY BIT(7) +-#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */ +-#define I915_BO_PROTECTED BIT(9) ++ I915_BO_ALLOC_GPU_ONLY | \ ++ I915_BO_ALLOC_CCS_AUX) ++#define I915_BO_READONLY BIT(8) ++#define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */ ++#define I915_BO_PROTECTED BIT(10) + /** + * @mem_flags - Mutable placement-related flags + * +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +index 07e49f22f2de3..7e67742bc65e0 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +@@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, + container_of(bo->bdev, typeof(*i915), bdev); + struct drm_i915_gem_object *backup; + struct ttm_operation_ctx ctx = {}; ++ unsigned int flags; + int err = 0; + + if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) +@@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, + if (obj->flags & I915_BO_ALLOC_PM_VOLATILE) + return 0; + +- backup = i915_gem_object_create_shmem(i915, obj->base.size); ++ /* ++ * It seems that we might have some framebuffers still pinned at this ++ * stage, but for such objects we might also need to deal with the CCS ++ * aux state. Make sure we force the save/restore of the CCS state, ++ * otherwise we might observe display corruption, when returning from ++ * suspend. ++ */ ++ flags = 0; ++ if (i915_gem_object_needs_ccs_pages(obj)) { ++ WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj)); ++ WARN_ON_ONCE(!pm_apply->allow_gpu); ++ ++ flags = I915_BO_ALLOC_CCS_AUX; ++ } ++ backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], ++ obj->base.size, 0, flags); + if (IS_ERR(backup)) + return PTR_ERR(backup); + +diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c +index aaaf1906026c1..ee072c7d62eb1 100644 +--- a/drivers/gpu/drm/i915/gt/intel_migrate.c ++++ b/drivers/gpu/drm/i915/gt/intel_migrate.c +@@ -341,6 +341,16 @@ static int emit_no_arbitration(struct i915_request *rq) + return 0; + } + ++static int max_pte_pkt_size(struct i915_request *rq, int pkt) ++{ ++ struct intel_ring *ring = rq->ring; ++ ++ pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5); ++ pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); ++ ++ return pkt; ++} ++ + static int emit_pte(struct i915_request *rq, + struct sgt_dma *it, + enum i915_cache_level cache_level, +@@ -387,8 +397,7 @@ static int emit_pte(struct i915_request *rq, + return PTR_ERR(cs); + + /* Pack as many PTE updates as possible into a single MI command */ +- pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5); +- pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); ++ pkt = max_pte_pkt_size(rq, dword_length); + + hdr = cs; + *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */ +@@ -421,8 +430,7 @@ static int emit_pte(struct i915_request *rq, + } + } + +- pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5); +- pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); ++ pkt = max_pte_pkt_size(rq, dword_rem); + + hdr = cs; + *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); +diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c +index f025ee4fa5261..a4b4d9b7d26c7 100644 +--- a/drivers/gpu/drm/i915/i915_gem_evict.c ++++ b/drivers/gpu/drm/i915/i915_gem_evict.c +@@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, + * @vm: Address space to cleanse + * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm + * will be able to evict vma's locked by the ww as well. ++ * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then ++ * in the event i915_gem_evict_vm() is unable to trylock an object for eviction, ++ * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop ++ * the vm->mutex, before trying again to acquire the contended lock. The caller ++ * also owns a reference to the object. + * + * This function evicts all vmas from a vm. + * +@@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, + * To clarify: This is for freeing up virtual address space, not for freeing + * memory in e.g. the shrinker. + */ +-int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) ++int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, ++ struct drm_i915_gem_object **busy_bo) + { + int ret = 0; + +@@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) + * the resv is shared among multiple objects, we still + * need the object ref. + */ +- if (dying_vma(vma) || ++ if (!i915_gem_object_get_rcu(vma->obj) || + (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) { + __i915_vma_pin(vma); + list_add(&vma->evict_link, &locked_eviction_list); + continue; + } + +- if (!i915_gem_object_trylock(vma->obj, ww)) ++ if (!i915_gem_object_trylock(vma->obj, ww)) { ++ if (busy_bo) { ++ *busy_bo = vma->obj; /* holds ref */ ++ ret = -EBUSY; ++ break; ++ } ++ i915_gem_object_put(vma->obj); + continue; ++ } + + __i915_vma_pin(vma); + list_add(&vma->evict_link, &eviction_list); +@@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) + if (list_empty(&eviction_list) && list_empty(&locked_eviction_list)) + break; + +- ret = 0; + /* Unbind locked objects first, before unlocking the eviction_list */ + list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) { + __i915_vma_unpin(vma); + +- if (ret == 0) ++ if (ret == 0) { + ret = __i915_vma_unbind(vma); +- if (ret != -EINTR) /* "Get me out of here!" */ +- ret = 0; ++ if (ret != -EINTR) /* "Get me out of here!" */ ++ ret = 0; ++ } ++ if (!dying_vma(vma)) ++ i915_gem_object_put(vma->obj); + } + + list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { + __i915_vma_unpin(vma); +- if (ret == 0) ++ if (ret == 0) { + ret = __i915_vma_unbind(vma); +- if (ret != -EINTR) /* "Get me out of here!" */ +- ret = 0; ++ if (ret != -EINTR) /* "Get me out of here!" */ ++ ret = 0; ++ } + + i915_gem_object_unlock(vma->obj); ++ i915_gem_object_put(vma->obj); + } + } while (ret == 0); + +diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h +index e593c530f9bd7..bf0ee0e4fe608 100644 +--- a/drivers/gpu/drm/i915/i915_gem_evict.h ++++ b/drivers/gpu/drm/i915/i915_gem_evict.h +@@ -11,6 +11,7 @@ + struct drm_mm_node; + struct i915_address_space; + struct i915_gem_ww_ctx; ++struct drm_i915_gem_object; + + int __must_check i915_gem_evict_something(struct i915_address_space *vm, + struct i915_gem_ww_ctx *ww, +@@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, + struct drm_mm_node *node, + unsigned int flags); + int i915_gem_evict_vm(struct i915_address_space *vm, +- struct i915_gem_ww_ctx *ww); ++ struct i915_gem_ww_ctx *ww, ++ struct drm_i915_gem_object **busy_bo); + + #endif /* __I915_GEM_EVICT_H__ */ +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c +index f17c09ead7d77..4d06875de14a1 100644 +--- a/drivers/gpu/drm/i915/i915_vma.c ++++ b/drivers/gpu/drm/i915/i915_vma.c +@@ -1569,7 +1569,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, + * locked objects when called from execbuf when pinning + * is removed. This would probably regress badly. + */ +- i915_gem_evict_vm(vm, NULL); ++ i915_gem_evict_vm(vm, NULL, NULL); + mutex_unlock(&vm->mutex); + } + } while (1); +diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +index 8c6517d29b8e0..37068542aafe7 100644 +--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c ++++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +@@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg) + + /* Everything is pinned, nothing should happen */ + mutex_lock(&ggtt->vm.mutex); +- err = i915_gem_evict_vm(&ggtt->vm, NULL); ++ err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL); + mutex_unlock(&ggtt->vm.mutex); + if (err) { + pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", +@@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg) + + for_i915_gem_ww(&ww, err, false) { + mutex_lock(&ggtt->vm.mutex); +- err = i915_gem_evict_vm(&ggtt->vm, &ww); ++ err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); + mutex_unlock(&ggtt->vm.mutex); + } + +diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +index ab0515d2c420a..4499a04f7c138 100644 +--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c ++++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +@@ -1629,7 +1629,11 @@ static int ingenic_drm_init(void) + return err; + } + +- return platform_driver_register(&ingenic_drm_driver); ++ err = platform_driver_register(&ingenic_drm_driver); ++ if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err) ++ platform_driver_unregister(ingenic_ipu_driver_ptr); ++ ++ return err; + } + module_init(ingenic_drm_init); + +diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c +index be389ed91cbd8..bd6e573c9a1a3 100644 +--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c ++++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c +@@ -284,7 +284,8 @@ static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc, + pixpllcp = pixpllc->p - 1; + pixpllcs = pixpllc->s; + +- xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); ++ // For G200SE A, BIT(7) should be set unconditionally. ++ xpixpllcm = BIT(7) | pixpllcm; + xpixpllcn = pixpllcn; + xpixpllcp = (pixpllcs << 3) | pixpllcp; + +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index 214829c32ed87..7a2f262414ad4 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -308,7 +308,8 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, + if (cmd->dma.guest.ptr.offset % PAGE_SIZE || + box->x != 0 || box->y != 0 || box->z != 0 || + box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || +- box->d != 1 || box_count != 1) { ++ box->d != 1 || box_count != 1 || ++ box->w > 64 || box->h > 64) { + /* TODO handle none page aligned offsets */ + /* TODO handle more dst & src != 0 */ + /* TODO handle more then one copy */ +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index e27fb27a36bfa..82713ef3aaa64 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -412,6 +412,7 @@ + #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 + #define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05 + #define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF ++#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9 + #define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817 + #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 + #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c +index d728a94c642eb..3ee5a9fea20e6 100644 +--- a/drivers/hid/hid-input.c ++++ b/drivers/hid/hid-input.c +@@ -380,6 +380,8 @@ static const struct hid_device_id hid_battery_quirks[] = { + HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100), + HID_BATTERY_QUIRK_IGNORE }, ++ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV), ++ HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15), + HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN), +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c +index 1a2d425bf5687..34029d1161073 100644 +--- a/drivers/iommu/amd/init.c ++++ b/drivers/iommu/amd/init.c +@@ -3402,18 +3402,24 @@ static int __init parse_amd_iommu_options(char *str) + static int __init parse_ivrs_ioapic(char *str) + { + u32 seg = 0, bus, dev, fn; +- int ret, id, i; ++ int id, i; + u32 devid; + +- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); +- if (ret != 4) { +- ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn); +- if (ret != 5) { +- pr_err("Invalid command line: ivrs_ioapic%s\n", str); +- return 1; +- } ++ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || ++ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) ++ goto found; ++ ++ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || ++ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { ++ pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n", ++ str, id, seg, bus, dev, fn); ++ goto found; + } + ++ pr_err("Invalid command line: ivrs_ioapic%s\n", str); ++ return 1; ++ ++found: + if (early_ioapic_map_size == EARLY_MAP_SIZE) { + pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", + str); +@@ -3434,18 +3440,24 @@ static int __init parse_ivrs_ioapic(char *str) + static int __init parse_ivrs_hpet(char *str) + { + u32 seg = 0, bus, dev, fn; +- int ret, id, i; ++ int id, i; + u32 devid; + +- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); +- if (ret != 4) { +- ret = sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn); +- if (ret != 5) { +- pr_err("Invalid command line: ivrs_hpet%s\n", str); +- return 1; +- } ++ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 || ++ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) ++ goto found; ++ ++ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 || ++ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) { ++ pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n", ++ str, id, seg, bus, dev, fn); ++ goto found; + } + ++ pr_err("Invalid command line: ivrs_hpet%s\n", str); ++ return 1; ++ ++found: + if (early_hpet_map_size == EARLY_MAP_SIZE) { + pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n", + str); +@@ -3466,19 +3478,36 @@ static int __init parse_ivrs_hpet(char *str) + static int __init parse_ivrs_acpihid(char *str) + { + u32 seg = 0, bus, dev, fn; +- char *hid, *uid, *p; ++ char *hid, *uid, *p, *addr; + char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; +- int ret, i; +- +- ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); +- if (ret != 4) { +- ret = sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid); +- if (ret != 5) { +- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str); +- return 1; ++ int i; ++ ++ addr = strchr(str, '@'); ++ if (!addr) { ++ if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 || ++ sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) { ++ pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n", ++ str, acpiid, seg, bus, dev, fn); ++ goto found; + } ++ goto not_found; + } + ++ /* We have the '@', make it the terminator to get just the acpiid */ ++ *addr++ = 0; ++ ++ if (sscanf(str, "=%s", acpiid) != 1) ++ goto not_found; ++ ++ if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 || ++ sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4) ++ goto found; ++ ++not_found: ++ pr_err("Invalid command line: ivrs_acpihid%s\n", str); ++ return 1; ++ ++found: + p = acpiid; + hid = strsep(&p, ":"); + uid = p; +@@ -3488,6 +3517,13 @@ static int __init parse_ivrs_acpihid(char *str) + return 1; + } + ++ /* ++ * Ignore leading zeroes after ':', so e.g., AMDI0095:00 ++ * will match AMDI0095:0 in the second strcmp in acpi_dev_hid_uid_match ++ */ ++ while (*uid == '0' && *(uid + 1)) ++ uid++; ++ + i = early_acpihid_map_size++; + memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); + memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c +index ab13b73802650..83a5975bcc729 100644 +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -551,11 +551,13 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, + return r; + } + +-static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd) ++static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd, ++ bool destroy_bm) + { + dm_sm_destroy(cmd->metadata_sm); + dm_tm_destroy(cmd->tm); +- dm_block_manager_destroy(cmd->bm); ++ if (destroy_bm) ++ dm_block_manager_destroy(cmd->bm); + } + + typedef unsigned long (*flags_mutator)(unsigned long); +@@ -826,7 +828,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, + cmd2 = lookup(bdev); + if (cmd2) { + mutex_unlock(&table_lock); +- __destroy_persistent_data_objects(cmd); ++ __destroy_persistent_data_objects(cmd, true); + kfree(cmd); + return cmd2; + } +@@ -874,7 +876,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd) + mutex_unlock(&table_lock); + + if (!cmd->fail_io) +- __destroy_persistent_data_objects(cmd); ++ __destroy_persistent_data_objects(cmd, true); + kfree(cmd); + } + } +@@ -1807,14 +1809,52 @@ int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result) + + int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) + { +- int r; ++ int r = -EINVAL; ++ struct dm_block_manager *old_bm = NULL, *new_bm = NULL; ++ ++ /* fail_io is double-checked with cmd->root_lock held below */ ++ if (unlikely(cmd->fail_io)) ++ return r; ++ ++ /* ++ * Replacement block manager (new_bm) is created and old_bm destroyed outside of ++ * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of ++ * shrinker associated with the block manager's bufio client vs cmd root_lock). ++ * - must take shrinker_rwsem without holding cmd->root_lock ++ */ ++ new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, ++ CACHE_MAX_CONCURRENT_LOCKS); + + WRITE_LOCK(cmd); +- __destroy_persistent_data_objects(cmd); +- r = __create_persistent_data_objects(cmd, false); ++ if (cmd->fail_io) { ++ WRITE_UNLOCK(cmd); ++ goto out; ++ } ++ ++ __destroy_persistent_data_objects(cmd, false); ++ old_bm = cmd->bm; ++ if (IS_ERR(new_bm)) { ++ DMERR("could not create block manager during abort"); ++ cmd->bm = NULL; ++ r = PTR_ERR(new_bm); ++ goto out_unlock; ++ } ++ ++ cmd->bm = new_bm; ++ r = __open_or_format_metadata(cmd, false); ++ if (r) { ++ cmd->bm = NULL; ++ goto out_unlock; ++ } ++ new_bm = NULL; ++out_unlock: + if (r) + cmd->fail_io = true; + WRITE_UNLOCK(cmd); ++ dm_block_manager_destroy(old_bm); ++out: ++ if (new_bm && !IS_ERR(new_bm)) ++ dm_block_manager_destroy(new_bm); + + return r; + } +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c +index 54a8d5c9a44ea..5e92fac90b675 100644 +--- a/drivers/md/dm-cache-target.c ++++ b/drivers/md/dm-cache-target.c +@@ -907,16 +907,16 @@ static void abort_transaction(struct cache *cache) + if (get_cache_mode(cache) >= CM_READ_ONLY) + return; + +- if (dm_cache_metadata_set_needs_check(cache->cmd)) { +- DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); +- set_cache_mode(cache, CM_FAIL); +- } +- + DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); + if (dm_cache_metadata_abort(cache->cmd)) { + DMERR("%s: failed to abort metadata transaction", dev_name); + set_cache_mode(cache, CM_FAIL); + } ++ ++ if (dm_cache_metadata_set_needs_check(cache->cmd)) { ++ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); ++ set_cache_mode(cache, CM_FAIL); ++ } + } + + static void metadata_operation_failed(struct cache *cache, const char *op, int r) +@@ -1887,6 +1887,7 @@ static void destroy(struct cache *cache) + if (cache->prison) + dm_bio_prison_destroy_v2(cache->prison); + ++ cancel_delayed_work_sync(&cache->waker); + if (cache->wq) + destroy_workqueue(cache->wq); + +diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c +index 2f1cc66d26412..29e0b85eeaf09 100644 +--- a/drivers/md/dm-clone-target.c ++++ b/drivers/md/dm-clone-target.c +@@ -1958,6 +1958,7 @@ static void clone_dtr(struct dm_target *ti) + + mempool_exit(&clone->hydration_pool); + dm_kcopyd_client_destroy(clone->kcopyd_client); ++ cancel_delayed_work_sync(&clone->waker); + destroy_workqueue(clone->wq); + hash_table_exit(clone); + dm_clone_metadata_close(clone->cmd); +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index e97e9f97456d4..1388ee35571e0 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -4558,6 +4558,8 @@ static void dm_integrity_dtr(struct dm_target *ti) + BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); + BUG_ON(!list_empty(&ic->wait_list)); + ++ if (ic->mode == 'B') ++ cancel_delayed_work_sync(&ic->bitmap_flush_work); + if (ic->metadata_wq) + destroy_workqueue(ic->metadata_wq); + if (ic->wait_wq) +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c +index a27395c8621ff..6bcc4c4786d89 100644 +--- a/drivers/md/dm-thin-metadata.c ++++ b/drivers/md/dm-thin-metadata.c +@@ -724,6 +724,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) + goto bad_cleanup_data_sm; + } + ++ /* ++ * For pool metadata opening process, root setting is redundant ++ * because it will be set again in __begin_transaction(). But dm ++ * pool aborting process really needs to get last transaction's ++ * root to avoid accessing broken btree. ++ */ ++ pmd->root = le64_to_cpu(disk_super->data_mapping_root); ++ pmd->details_root = le64_to_cpu(disk_super->device_details_root); ++ + __setup_btree_details(pmd); + dm_bm_unlock(sblock); + +@@ -776,13 +785,15 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f + return r; + } + +-static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) ++static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd, ++ bool destroy_bm) + { + dm_sm_destroy(pmd->data_sm); + dm_sm_destroy(pmd->metadata_sm); + dm_tm_destroy(pmd->nb_tm); + dm_tm_destroy(pmd->tm); +- dm_block_manager_destroy(pmd->bm); ++ if (destroy_bm) ++ dm_block_manager_destroy(pmd->bm); + } + + static int __begin_transaction(struct dm_pool_metadata *pmd) +@@ -989,7 +1000,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) + } + pmd_write_unlock(pmd); + if (!pmd->fail_io) +- __destroy_persistent_data_objects(pmd); ++ __destroy_persistent_data_objects(pmd, true); + + kfree(pmd); + return 0; +@@ -1860,19 +1871,52 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) + int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) + { + int r = -EINVAL; ++ struct dm_block_manager *old_bm = NULL, *new_bm = NULL; ++ ++ /* fail_io is double-checked with pmd->root_lock held below */ ++ if (unlikely(pmd->fail_io)) ++ return r; ++ ++ /* ++ * Replacement block manager (new_bm) is created and old_bm destroyed outside of ++ * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of ++ * shrinker associated with the block manager's bufio client vs pmd root_lock). ++ * - must take shrinker_rwsem without holding pmd->root_lock ++ */ ++ new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, ++ THIN_MAX_CONCURRENT_LOCKS); + + pmd_write_lock(pmd); +- if (pmd->fail_io) ++ if (pmd->fail_io) { ++ pmd_write_unlock(pmd); + goto out; ++ } + + __set_abort_with_changes_flags(pmd); +- __destroy_persistent_data_objects(pmd); +- r = __create_persistent_data_objects(pmd, false); ++ __destroy_persistent_data_objects(pmd, false); ++ old_bm = pmd->bm; ++ if (IS_ERR(new_bm)) { ++ DMERR("could not create block manager during abort"); ++ pmd->bm = NULL; ++ r = PTR_ERR(new_bm); ++ goto out_unlock; ++ } ++ ++ pmd->bm = new_bm; ++ r = __open_or_format_metadata(pmd, false); ++ if (r) { ++ pmd->bm = NULL; ++ goto out_unlock; ++ } ++ new_bm = NULL; ++out_unlock: + if (r) + pmd->fail_io = true; +- +-out: + pmd_write_unlock(pmd); ++ dm_block_manager_destroy(old_bm); ++out: ++ if (new_bm && !IS_ERR(new_bm)) ++ dm_block_manager_destroy(new_bm); + + return r; + } +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index e76c96c760a9b..196f82559ad6b 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2889,6 +2889,8 @@ static void __pool_destroy(struct pool *pool) + dm_bio_prison_destroy(pool->prison); + dm_kcopyd_client_destroy(pool->copier); + ++ cancel_delayed_work_sync(&pool->waker); ++ cancel_delayed_work_sync(&pool->no_space_timeout); + if (pool->wq) + destroy_workqueue(pool->wq); + +@@ -3540,20 +3542,28 @@ static int pool_preresume(struct dm_target *ti) + */ + r = bind_control_target(pool, ti); + if (r) +- return r; ++ goto out; + + r = maybe_resize_data_dev(ti, &need_commit1); + if (r) +- return r; ++ goto out; + + r = maybe_resize_metadata_dev(ti, &need_commit2); + if (r) +- return r; ++ goto out; + + if (need_commit1 || need_commit2) + (void) commit(pool); ++out: ++ /* ++ * When a thin-pool is PM_FAIL, it cannot be rebuilt if ++ * bio is in deferred list. Therefore need to return 0 ++ * to allow pool_resume() to flush IO. ++ */ ++ if (r && get_pool_mode(pool) == PM_FAIL) ++ r = 0; + +- return 0; ++ return r; + } + + static void pool_suspend_active_thins(struct pool *pool) +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c +index 63ece30114e53..e7cc6ba1b657f 100644 +--- a/drivers/md/md-bitmap.c ++++ b/drivers/md/md-bitmap.c +@@ -486,7 +486,7 @@ void md_bitmap_print_sb(struct bitmap *bitmap) + sb = kmap_atomic(bitmap->storage.sb_page); + pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); + pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); +- pr_debug(" version: %d\n", le32_to_cpu(sb->version)); ++ pr_debug(" version: %u\n", le32_to_cpu(sb->version)); + pr_debug(" uuid: %08x.%08x.%08x.%08x\n", + le32_to_cpu(*(__le32 *)(sb->uuid+0)), + le32_to_cpu(*(__le32 *)(sb->uuid+4)), +@@ -497,11 +497,11 @@ void md_bitmap_print_sb(struct bitmap *bitmap) + pr_debug("events cleared: %llu\n", + (unsigned long long) le64_to_cpu(sb->events_cleared)); + pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); +- pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); +- pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); ++ pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize)); ++ pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep)); + pr_debug(" sync size: %llu KB\n", + (unsigned long long)le64_to_cpu(sb->sync_size)/2); +- pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); ++ pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind)); + kunmap_atomic(sb); + } + +@@ -2105,7 +2105,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, + bytes = DIV_ROUND_UP(chunks, 8); + if (!bitmap->mddev->bitmap_info.external) + bytes += sizeof(bitmap_super_t); +- } while (bytes > (space << 9)); ++ } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) < ++ (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1)); + } else + chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; + +@@ -2150,7 +2151,7 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, + bitmap->counts.missing_pages = pages; + bitmap->counts.chunkshift = chunkshift; + bitmap->counts.chunks = chunks; +- bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + ++ bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift + + BITMAP_BLOCK_SHIFT); + + blocks = min(old_counts.chunks << old_counts.chunkshift, +@@ -2176,8 +2177,8 @@ int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, + bitmap->counts.missing_pages = old_counts.pages; + bitmap->counts.chunkshift = old_counts.chunkshift; + bitmap->counts.chunks = old_counts.chunks; +- bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + +- BITMAP_BLOCK_SHIFT); ++ bitmap->mddev->bitmap_info.chunksize = ++ 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); + blocks = old_counts.chunks << old_counts.chunkshift; + pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); + break; +@@ -2537,6 +2538,9 @@ chunksize_store(struct mddev *mddev, const char *buf, size_t len) + if (csize < 512 || + !is_power_of_2(csize)) + return -EINVAL; ++ if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE * ++ sizeof(((bitmap_super_t *)0)->chunksize)))) ++ return -EOVERFLOW; + mddev->bitmap_info.chunksize = csize; + return len; + } +diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c +index f6ee678107d37..9ce5f010de3f8 100644 +--- a/drivers/media/dvb-core/dmxdev.c ++++ b/drivers/media/dvb-core/dmxdev.c +@@ -790,6 +790,11 @@ static int dvb_demux_open(struct inode *inode, struct file *file) + if (mutex_lock_interruptible(&dmxdev->mutex)) + return -ERESTARTSYS; + ++ if (dmxdev->exit) { ++ mutex_unlock(&dmxdev->mutex); ++ return -ENODEV; ++ } ++ + for (i = 0; i < dmxdev->filternum; i++) + if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) + break; +@@ -1448,7 +1453,10 @@ EXPORT_SYMBOL(dvb_dmxdev_init); + + void dvb_dmxdev_release(struct dmxdev *dmxdev) + { ++ mutex_lock(&dmxdev->mutex); + dmxdev->exit = 1; ++ mutex_unlock(&dmxdev->mutex); ++ + if (dmxdev->dvbdev->users > 1) { + wait_event(dmxdev->dvbdev->wait_queue, + dmxdev->dvbdev->users == 1); +diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c +index 9934728734af9..a31d52cb6d62c 100644 +--- a/drivers/media/dvb-core/dvbdev.c ++++ b/drivers/media/dvb-core/dvbdev.c +@@ -335,6 +335,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev, + GFP_KERNEL); + if (!dvbdev->pads) { + kfree(dvbdev->entity); ++ dvbdev->entity = NULL; + return -ENOMEM; + } + } +diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c +index 3d54a0ec86afd..3ae1f3a2f1420 100644 +--- a/drivers/media/dvb-frontends/stv0288.c ++++ b/drivers/media/dvb-frontends/stv0288.c +@@ -440,9 +440,8 @@ static int stv0288_set_frontend(struct dvb_frontend *fe) + struct stv0288_state *state = fe->demodulator_priv; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + +- char tm; +- unsigned char tda[3]; +- u8 reg, time_out = 0; ++ u8 tda[3], reg, time_out = 0; ++ s8 tm; + + dprintk("%s : FE_SET_FRONTEND\n", __func__); + +diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c +index 72d70984e99a6..6d3c92045c05f 100644 +--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c ++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_ctrl.c +@@ -468,8 +468,10 @@ void s5p_mfc_close_mfc_inst(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx) + s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); + /* Wait until instance is returned or timeout occurred */ + if (s5p_mfc_wait_for_done_ctx(ctx, +- S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) ++ S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)){ ++ clear_work_bit_irqsave(ctx); + mfc_err("Err returning instance\n"); ++ } + + /* Free resources */ + s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx); +diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c +index b65e506665af7..f62703cebb77c 100644 +--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c ++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c +@@ -1218,6 +1218,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) + unsigned long mb_y_addr, mb_c_addr; + int slice_type; + unsigned int strm_size; ++ bool src_ready; + + slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); + strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); +@@ -1257,7 +1258,8 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) + } + } + } +- if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) { ++ if (ctx->src_queue_cnt > 0 && (ctx->state == MFCINST_RUNNING || ++ ctx->state == MFCINST_FINISHING)) { + mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, + list); + if (mb_entry->flags & MFC_BUF_FLAG_USED) { +@@ -1288,7 +1290,13 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) + vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); + vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); + } +- if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) ++ ++ src_ready = true; ++ if (ctx->state == MFCINST_RUNNING && ctx->src_queue_cnt == 0) ++ src_ready = false; ++ if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0) ++ src_ready = false; ++ if (!src_ready || ctx->dst_queue_cnt == 0) + clear_work_bit(ctx); + + return 0; +diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c +index 8227004f67469..c0df5ac9fcff2 100644 +--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c ++++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c +@@ -1060,7 +1060,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) + } + + /* aspect ratio VUI */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x1 << 5); + reg |= ((p_h264->vui_sar & 0x1) << 5); + writel(reg, mfc_regs->e_h264_options); +@@ -1083,7 +1083,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) + + /* intra picture period for H.264 open GOP */ + /* control */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x1 << 4); + reg |= ((p_h264->open_gop & 0x1) << 4); + writel(reg, mfc_regs->e_h264_options); +@@ -1097,23 +1097,23 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) + } + + /* 'WEIGHTED_BI_PREDICTION' for B is disable */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x3 << 9); + writel(reg, mfc_regs->e_h264_options); + + /* 'CONSTRAINED_INTRA_PRED_ENABLE' is disable */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x1 << 14); + writel(reg, mfc_regs->e_h264_options); + + /* ASO */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x1 << 6); + reg |= ((p_h264->aso & 0x1) << 6); + writel(reg, mfc_regs->e_h264_options); + + /* hier qp enable */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x1 << 8); + reg |= ((p_h264->open_gop & 0x1) << 8); + writel(reg, mfc_regs->e_h264_options); +@@ -1134,7 +1134,7 @@ static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) + writel(reg, mfc_regs->e_h264_num_t_layer); + + /* frame packing SEI generation */ +- readl(mfc_regs->e_h264_options); ++ reg = readl(mfc_regs->e_h264_options); + reg &= ~(0x1 << 25); + reg |= ((p_h264->sei_frame_packing & 0x1) << 25); + writel(reg, mfc_regs->e_h264_options); +diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c +index bec3f9e3cd3fa..525f979e2a974 100644 +--- a/drivers/mmc/host/sdhci-sprd.c ++++ b/drivers/mmc/host/sdhci-sprd.c +@@ -228,13 +228,15 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, + div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); + sdhci_enable_clk(host, div); + +- /* enable auto gate sdhc_enable_auto_gate */ +- val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); +- mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | +- SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; +- if (mask != (val & mask)) { +- val |= mask; +- sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); ++ /* Enable CLK_AUTO when the clock is greater than 400K. */ ++ if (clk > 400000) { ++ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); ++ mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | ++ SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; ++ if (mask != (val & mask)) { ++ val |= mask; ++ sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); ++ } + } + } + +diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c +index 0cf1a1797ea32..2e0655c0b606f 100644 +--- a/drivers/mtd/spi-nor/core.c ++++ b/drivers/mtd/spi-nor/core.c +@@ -1184,6 +1184,8 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, + continue; + + erase = &map->erase_type[i]; ++ if (!erase->size) ++ continue; + + /* Alignment is not mandatory for overlaid regions */ + if (region->offset & SNOR_OVERLAID_REGION && +diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c +index 119b38e6fc2a3..d57ddaf1525b3 100644 +--- a/drivers/mtd/spi-nor/gigadevice.c ++++ b/drivers/mtd/spi-nor/gigadevice.c +@@ -8,19 +8,29 @@ + + #include "core.h" + +-static void gd25q256_default_init(struct spi_nor *nor) ++static int ++gd25q256_post_bfpt(struct spi_nor *nor, ++ const struct sfdp_parameter_header *bfpt_header, ++ const struct sfdp_bfpt *bfpt) + { + /* +- * Some manufacturer like GigaDevice may use different +- * bit to set QE on different memories, so the MFR can't +- * indicate the quad_enable method for this case, we need +- * to set it in the default_init fixup hook. ++ * GD25Q256C supports the first version of JESD216 which does not define ++ * the Quad Enable methods. Overwrite the default Quad Enable method. ++ * ++ * GD25Q256 GENERATION | SFDP MAJOR VERSION | SFDP MINOR VERSION ++ * GD25Q256C | SFDP_JESD216_MAJOR | SFDP_JESD216_MINOR ++ * GD25Q256D | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR ++ * GD25Q256E | SFDP_JESD216_MAJOR | SFDP_JESD216B_MINOR + */ +- nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; ++ if (bfpt_header->major == SFDP_JESD216_MAJOR && ++ bfpt_header->minor == SFDP_JESD216_MINOR) ++ nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable; ++ ++ return 0; + } + + static const struct spi_nor_fixups gd25q256_fixups = { +- .default_init = gd25q256_default_init, ++ .post_bfpt = gd25q256_post_bfpt, + }; + + static const struct flash_info gigadevice_nor_parts[] = { +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 33f723a9f471b..b4e0fc7f65bdf 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -2903,12 +2903,12 @@ static int ravb_remove(struct platform_device *pdev) + priv->desc_bat_dma); + /* Set reset mode */ + ravb_write(ndev, CCC_OPC_RESET, CCC); +- pm_runtime_put_sync(&pdev->dev); + unregister_netdev(ndev); + if (info->nc_queues) + netif_napi_del(&priv->napi[RAVB_NC]); + netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); ++ pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + free_netdev(ndev); +diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c +index 7390f94cd4ca2..a05bda7b9a3ba 100644 +--- a/drivers/net/wireless/microchip/wilc1000/sdio.c ++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c +@@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, + { }, + }; ++MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids); + + #define WILC_SDIO_BLOCK_SIZE 512 + +diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c +index e6c01db393f95..f26d2ba8a3715 100644 +--- a/drivers/of/kexec.c ++++ b/drivers/of/kexec.c +@@ -281,7 +281,7 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, + const char *cmdline, size_t extra_fdt_size) + { + void *fdt; +- int ret, chosen_node; ++ int ret, chosen_node, len; + const void *prop; + size_t fdt_size; + +@@ -324,19 +324,19 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, + goto out; + + /* Did we boot using an initrd? */ +- prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL); ++ prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", &len); + if (prop) { + u64 tmp_start, tmp_end, tmp_size; + +- tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop)); ++ tmp_start = of_read_number(prop, len / 4); + +- prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL); ++ prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", &len); + if (!prop) { + ret = -EINVAL; + goto out; + } + +- tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop)); ++ tmp_end = of_read_number(prop, len / 4); + + /* + * kexec reserves exact initrd size, while firmware may +diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c +index d4be9d2ee74d9..8bdc5e043831c 100644 +--- a/drivers/parisc/led.c ++++ b/drivers/parisc/led.c +@@ -137,6 +137,9 @@ static int start_task(void) + + /* Create the work queue and queue the LED task */ + led_wq = create_singlethread_workqueue("led_wq"); ++ if (!led_wq) ++ return -ENOMEM; ++ + queue_delayed_work(led_wq, &led_task, 0); + + return 0; +diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c +index e402f05068a53..66d9ab2886468 100644 +--- a/drivers/pci/doe.c ++++ b/drivers/pci/doe.c +@@ -29,6 +29,9 @@ + #define PCI_DOE_FLAG_CANCEL 0 + #define PCI_DOE_FLAG_DEAD 1 + ++/* Max data object length is 2^18 dwords */ ++#define PCI_DOE_MAX_LENGTH (1 << 18) ++ + /** + * struct pci_doe_mb - State for a single DOE mailbox + * +@@ -107,6 +110,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, + { + struct pci_dev *pdev = doe_mb->pdev; + int offset = doe_mb->cap_offset; ++ size_t length; + u32 val; + int i; + +@@ -123,15 +127,20 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, + if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) + return -EIO; + ++ /* Length is 2 DW of header + length of payload in DW */ ++ length = 2 + task->request_pl_sz / sizeof(u32); ++ if (length > PCI_DOE_MAX_LENGTH) ++ return -EIO; ++ if (length == PCI_DOE_MAX_LENGTH) ++ length = 0; ++ + /* Write DOE Header */ + val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) | + FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type); + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); +- /* Length is 2 DW of header + length of payload in DW */ + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, + FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, +- 2 + task->request_pl_sz / +- sizeof(u32))); ++ length)); + for (i = 0; i < task->request_pl_sz / sizeof(u32); i++) + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, + task->request_pl[i]); +@@ -178,7 +187,10 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas + pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + + length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val); +- if (length > SZ_1M || length < 2) ++ /* A value of 0x0 indicates max data object length */ ++ if (!length) ++ length = PCI_DOE_MAX_LENGTH; ++ if (length < 2) + return -EIO; + + /* First 2 dwords have already been read */ +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index 0a2eeb82cebde..ba38fc47d35e9 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -1175,11 +1175,9 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) + + sysfs_bin_attr_init(res_attr); + if (write_combine) { +- pdev->res_attr_wc[num] = res_attr; + sprintf(res_attr_name, "resource%d_wc", num); + res_attr->mmap = pci_mmap_resource_wc; + } else { +- pdev->res_attr[num] = res_attr; + sprintf(res_attr_name, "resource%d", num); + if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { + res_attr->read = pci_read_resource_io; +@@ -1197,10 +1195,17 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) + res_attr->size = pci_resource_len(pdev, num); + res_attr->private = (void *)(unsigned long)num; + retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); +- if (retval) ++ if (retval) { + kfree(res_attr); ++ return retval; ++ } ++ ++ if (write_combine) ++ pdev->res_attr_wc[num] = res_attr; ++ else ++ pdev->res_attr[num] = res_attr; + +- return retval; ++ return 0; + } + + /** +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 2127aba3550b5..ab615ab4e4409 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -6447,6 +6447,8 @@ bool pci_device_is_present(struct pci_dev *pdev) + { + u32 v; + ++ /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ ++ pdev = pci_physfn(pdev); + if (pci_dev_is_disconnected(pdev)) + return false; + return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); +diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +index ba9d761ec49a7..91f8ee79000df 100644 +--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c ++++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +@@ -1121,9 +1121,46 @@ static const struct qmp_phy_cfg sdm845_usb3phy_cfg = { + .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX, + }; + ++static const struct qmp_phy_cfg sdm845_dpphy_cfg = { ++ .type = PHY_TYPE_DP, ++ .lanes = 2, ++ ++ .serdes_tbl = qmp_v3_dp_serdes_tbl, ++ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl), ++ .tx_tbl = qmp_v3_dp_tx_tbl, ++ .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl), ++ ++ .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr, ++ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr), ++ .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr, ++ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr), ++ .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2, ++ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2), ++ .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3, ++ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3), ++ ++ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr, ++ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr, ++ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2, ++ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2, ++ ++ .clk_list = qmp_v3_phy_clk_l, ++ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), ++ .reset_list = msm8996_usb3phy_reset_l, ++ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), ++ .vreg_list = qmp_phy_vreg_l, ++ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), ++ .regs = qmp_v3_usb3phy_regs_layout, ++ ++ .dp_aux_init = qcom_qmp_v3_phy_dp_aux_init, ++ .configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx, ++ .configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy, ++ .calibrate_dp_phy = qcom_qmp_v3_dp_phy_calibrate, ++}; ++ + static const struct qmp_phy_combo_cfg sdm845_usb3dpphy_cfg = { + .usb_cfg = &sdm845_usb3phy_cfg, +- .dp_cfg = &sc7180_dpphy_cfg, ++ .dp_cfg = &sdm845_dpphy_cfg, + }; + + static const struct qmp_phy_cfg sm8150_usb3phy_cfg = { +@@ -1184,8 +1221,8 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = { + + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), +- .reset_list = sc7180_usb3phy_reset_l, +- .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), ++ .reset_list = msm8996_usb3phy_reset_l, ++ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, +@@ -1328,8 +1365,8 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = { + .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2, + .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2, + +- .clk_list = qmp_v4_phy_clk_l, +- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l), ++ .clk_list = qmp_v4_sm8250_usbphy_clk_l, ++ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c +index 3ea8fc6a9ca36..fc3d47a759443 100644 +--- a/drivers/platform/x86/ideapad-laptop.c ++++ b/drivers/platform/x86/ideapad-laptop.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + #include + +@@ -37,20 +38,23 @@ + + #define IDEAPAD_RFKILL_DEV_NUM 3 + +-#if IS_ENABLED(CONFIG_ACPI_WMI) +-static const char *const ideapad_wmi_fnesc_events[] = { +- "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */ +- "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */ +- "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", /* Legion 5 */ +-}; +-#endif +- + enum { + CFG_CAP_BT_BIT = 16, + CFG_CAP_3G_BIT = 17, + CFG_CAP_WIFI_BIT = 18, + CFG_CAP_CAM_BIT = 19, +- CFG_CAP_TOUCHPAD_BIT = 30, ++ ++ /* ++ * These are OnScreenDisplay support bits that can be useful to determine ++ * whether a hotkey exists/should show OSD. But they aren't particularly ++ * meaningful since they were introduced later, i.e. 2010 IdeaPads ++ * don't have these, but they still have had OSD for hotkeys. ++ */ ++ CFG_OSD_NUMLK_BIT = 27, ++ CFG_OSD_CAPSLK_BIT = 28, ++ CFG_OSD_MICMUTE_BIT = 29, ++ CFG_OSD_TOUCHPAD_BIT = 30, ++ CFG_OSD_CAM_BIT = 31, + }; + + enum { +@@ -130,7 +134,7 @@ struct ideapad_private { + struct ideapad_dytc_priv *dytc; + struct dentry *debug; + unsigned long cfg; +- const char *fnesc_guid; ++ unsigned long r_touchpad_val; + struct { + bool conservation_mode : 1; + bool dytc : 1; +@@ -140,6 +144,7 @@ struct ideapad_private { + bool hw_rfkill_switch : 1; + bool kbd_bl : 1; + bool touchpad_ctrl_via_ec : 1; ++ bool ctrl_ps2_aux_port : 1; + bool usb_charging : 1; + } features; + struct { +@@ -171,6 +176,48 @@ MODULE_PARM_DESC(set_fn_lock_led, + "Enable driver based updates of the fn-lock LED on fn-lock changes. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); + ++static bool ctrl_ps2_aux_port; ++module_param(ctrl_ps2_aux_port, bool, 0444); ++MODULE_PARM_DESC(ctrl_ps2_aux_port, ++ "Enable driver based PS/2 aux port en-/dis-abling on touchpad on/off toggle. " ++ "If you need this please report this to: platform-driver-x86@vger.kernel.org"); ++ ++/* ++ * shared data ++ */ ++ ++static struct ideapad_private *ideapad_shared; ++static DEFINE_MUTEX(ideapad_shared_mutex); ++ ++static int ideapad_shared_init(struct ideapad_private *priv) ++{ ++ int ret; ++ ++ mutex_lock(&ideapad_shared_mutex); ++ ++ if (!ideapad_shared) { ++ ideapad_shared = priv; ++ ret = 0; ++ } else { ++ dev_warn(&priv->adev->dev, "found multiple platform devices\n"); ++ ret = -EINVAL; ++ } ++ ++ mutex_unlock(&ideapad_shared_mutex); ++ ++ return ret; ++} ++ ++static void ideapad_shared_exit(struct ideapad_private *priv) ++{ ++ mutex_lock(&ideapad_shared_mutex); ++ ++ if (ideapad_shared == priv) ++ ideapad_shared = NULL; ++ ++ mutex_unlock(&ideapad_shared_mutex); ++} ++ + /* + * ACPI Helpers + */ +@@ -386,8 +433,19 @@ static int debugfs_cfg_show(struct seq_file *s, void *data) + seq_puts(s, " wifi"); + if (test_bit(CFG_CAP_CAM_BIT, &priv->cfg)) + seq_puts(s, " camera"); +- if (test_bit(CFG_CAP_TOUCHPAD_BIT, &priv->cfg)) ++ seq_puts(s, "\n"); ++ ++ seq_puts(s, "OSD support:"); ++ if (test_bit(CFG_OSD_NUMLK_BIT, &priv->cfg)) ++ seq_puts(s, " num-lock"); ++ if (test_bit(CFG_OSD_CAPSLK_BIT, &priv->cfg)) ++ seq_puts(s, " caps-lock"); ++ if (test_bit(CFG_OSD_MICMUTE_BIT, &priv->cfg)) ++ seq_puts(s, " mic-mute"); ++ if (test_bit(CFG_OSD_TOUCHPAD_BIT, &priv->cfg)) + seq_puts(s, " touchpad"); ++ if (test_bit(CFG_OSD_CAM_BIT, &priv->cfg)) ++ seq_puts(s, " camera"); + seq_puts(s, "\n"); + + seq_puts(s, "Graphics: "); +@@ -593,6 +651,8 @@ static ssize_t touchpad_show(struct device *dev, + if (err) + return err; + ++ priv->r_touchpad_val = result; ++ + return sysfs_emit(buf, "%d\n", !!result); + } + +@@ -612,6 +672,8 @@ static ssize_t touchpad_store(struct device *dev, + if (err) + return err; + ++ priv->r_touchpad_val = state; ++ + return count; + } + +@@ -680,8 +742,7 @@ static umode_t ideapad_is_visible(struct kobject *kobj, + else if (attr == &dev_attr_fn_lock.attr) + supported = priv->features.fn_lock; + else if (attr == &dev_attr_touchpad.attr) +- supported = priv->features.touchpad_ctrl_via_ec && +- test_bit(CFG_CAP_TOUCHPAD_BIT, &priv->cfg); ++ supported = priv->features.touchpad_ctrl_via_ec; + else if (attr == &dev_attr_usb_charging.attr) + supported = priv->features.usb_charging; + +@@ -1089,6 +1150,8 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv) + /* + * input device + */ ++#define IDEAPAD_WMI_KEY 0x100 ++ + static const struct key_entry ideapad_keymap[] = { + { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, + { KE_KEY, 7, { KEY_CAMERA } }, +@@ -1101,7 +1164,30 @@ static const struct key_entry ideapad_keymap[] = { + { KE_KEY, 65, { KEY_PROG4 } }, + { KE_KEY, 66, { KEY_TOUCHPAD_OFF } }, + { KE_KEY, 67, { KEY_TOUCHPAD_ON } }, ++ { KE_KEY, 68, { KEY_TOUCHPAD_TOGGLE } }, + { KE_KEY, 128, { KEY_ESC } }, ++ ++ /* ++ * WMI keys ++ */ ++ ++ /* FnLock (handled by the firmware) */ ++ { KE_IGNORE, 0x02 | IDEAPAD_WMI_KEY }, ++ /* Esc (handled by the firmware) */ ++ { KE_IGNORE, 0x03 | IDEAPAD_WMI_KEY }, ++ /* Customizable Lenovo Hotkey ("star" with 'S' inside) */ ++ { KE_KEY, 0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } }, ++ /* Dark mode toggle */ ++ { KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, ++ /* Sound profile switch */ ++ { KE_KEY, 0x12 | IDEAPAD_WMI_KEY, { KEY_PROG2 } }, ++ /* Lenovo Virtual Background application */ ++ { KE_KEY, 0x28 | IDEAPAD_WMI_KEY, { KEY_PROG3 } }, ++ /* Lenovo Support */ ++ { KE_KEY, 0x27 | IDEAPAD_WMI_KEY, { KEY_HELP } }, ++ /* Refresh Rate Toggle */ ++ { KE_KEY, 0x0a | IDEAPAD_WMI_KEY, { KEY_DISPLAYTOGGLE } }, ++ + { KE_END }, + }; + +@@ -1414,26 +1500,41 @@ static void ideapad_kbd_bl_exit(struct ideapad_private *priv) + /* + * module init/exit + */ +-static void ideapad_sync_touchpad_state(struct ideapad_private *priv) ++static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_events) + { + unsigned long value; ++ unsigned char param; ++ int ret; + +- if (!priv->features.touchpad_ctrl_via_ec) ++ /* Without reading from EC touchpad LED doesn't switch state */ ++ ret = read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value); ++ if (ret) + return; + +- /* Without reading from EC touchpad LED doesn't switch state */ +- if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { +- unsigned char param; ++ /* ++ * Some IdeaPads don't really turn off touchpad - they only ++ * switch the LED state. We (de)activate KBC AUX port to turn ++ * touchpad off and on. We send KEY_TOUCHPAD_OFF and ++ * KEY_TOUCHPAD_ON to not to get out of sync with LED ++ */ ++ if (priv->features.ctrl_ps2_aux_port) ++ i8042_command(¶m, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE); ++ ++ if (send_events) { + /* +- * Some IdeaPads don't really turn off touchpad - they only +- * switch the LED state. We (de)activate KBC AUX port to turn +- * touchpad off and on. We send KEY_TOUCHPAD_OFF and +- * KEY_TOUCHPAD_ON to not to get out of sync with LED ++ * On older models the EC controls the touchpad and toggles it ++ * on/off itself, in this case we report KEY_TOUCHPAD_ON/_OFF. ++ * If the EC did not toggle, report KEY_TOUCHPAD_TOGGLE. + */ +- i8042_command(¶m, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE); +- ideapad_input_report(priv, value ? 67 : 66); +- sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad"); ++ if (value != priv->r_touchpad_val) { ++ ideapad_input_report(priv, value ? 67 : 66); ++ sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad"); ++ } else { ++ ideapad_input_report(priv, 68); ++ } + } ++ ++ priv->r_touchpad_val = value; + } + + static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) +@@ -1474,7 +1575,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) + ideapad_sync_rfk_state(priv); + break; + case 5: +- ideapad_sync_touchpad_state(priv); ++ ideapad_sync_touchpad_state(priv, true); + break; + case 4: + ideapad_backlight_notify_brightness(priv); +@@ -1505,33 +1606,6 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) + } + } + +-#if IS_ENABLED(CONFIG_ACPI_WMI) +-static void ideapad_wmi_notify(u32 value, void *context) +-{ +- struct ideapad_private *priv = context; +- unsigned long result; +- +- switch (value) { +- case 128: +- ideapad_input_report(priv, value); +- break; +- case 208: +- if (!priv->features.set_fn_lock_led) +- break; +- +- if (!eval_hals(priv->adev->handle, &result)) { +- bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result); +- +- exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF); +- } +- break; +- default: +- dev_info(&priv->platform_device->dev, +- "Unknown WMI event: %u\n", value); +- } +-} +-#endif +- + /* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */ + static const struct dmi_system_id set_fn_lock_led_list[] = { + { +@@ -1563,6 +1637,23 @@ static const struct dmi_system_id hw_rfkill_list[] = { + {} + }; + ++/* ++ * On some models the EC toggles the touchpad muted LED on touchpad toggle ++ * hotkey presses, but the EC does not actually disable the touchpad itself. ++ * On these models the driver needs to explicitly enable/disable the i8042 ++ * (PS/2) aux port. ++ */ ++static const struct dmi_system_id ctrl_ps2_aux_port_list[] = { ++ { ++ /* Lenovo Ideapad Z570 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), ++ }, ++ }, ++ {} ++}; ++ + static const struct dmi_system_id no_touchpad_switch_list[] = { + { + .ident = "Lenovo Yoga 3 Pro 1370", +@@ -1590,6 +1681,8 @@ static void ideapad_check_features(struct ideapad_private *priv) + set_fn_lock_led || dmi_check_system(set_fn_lock_led_list); + priv->features.hw_rfkill_switch = + hw_rfkill_switch || dmi_check_system(hw_rfkill_list); ++ priv->features.ctrl_ps2_aux_port = ++ ctrl_ps2_aux_port || dmi_check_system(ctrl_ps2_aux_port_list); + + /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */ + if (acpi_dev_present("ELAN0634", NULL, -1)) +@@ -1622,6 +1715,118 @@ static void ideapad_check_features(struct ideapad_private *priv) + } + } + ++#if IS_ENABLED(CONFIG_ACPI_WMI) ++/* ++ * WMI driver ++ */ ++enum ideapad_wmi_event_type { ++ IDEAPAD_WMI_EVENT_ESC, ++ IDEAPAD_WMI_EVENT_FN_KEYS, ++}; ++ ++struct ideapad_wmi_private { ++ enum ideapad_wmi_event_type event; ++}; ++ ++static int ideapad_wmi_probe(struct wmi_device *wdev, const void *context) ++{ ++ struct ideapad_wmi_private *wpriv; ++ ++ wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL); ++ if (!wpriv) ++ return -ENOMEM; ++ ++ *wpriv = *(const struct ideapad_wmi_private *)context; ++ ++ dev_set_drvdata(&wdev->dev, wpriv); ++ return 0; ++} ++ ++static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data) ++{ ++ struct ideapad_wmi_private *wpriv = dev_get_drvdata(&wdev->dev); ++ struct ideapad_private *priv; ++ unsigned long result; ++ ++ mutex_lock(&ideapad_shared_mutex); ++ ++ priv = ideapad_shared; ++ if (!priv) ++ goto unlock; ++ ++ switch (wpriv->event) { ++ case IDEAPAD_WMI_EVENT_ESC: ++ ideapad_input_report(priv, 128); ++ break; ++ case IDEAPAD_WMI_EVENT_FN_KEYS: ++ if (priv->features.set_fn_lock_led && ++ !eval_hals(priv->adev->handle, &result)) { ++ bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result); ++ ++ exec_sals(priv->adev->handle, state ? SALS_FNLOCK_ON : SALS_FNLOCK_OFF); ++ } ++ ++ if (data->type != ACPI_TYPE_INTEGER) { ++ dev_warn(&wdev->dev, ++ "WMI event data is not an integer\n"); ++ break; ++ } ++ ++ dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n", ++ data->integer.value); ++ ++ ideapad_input_report(priv, ++ data->integer.value | IDEAPAD_WMI_KEY); ++ ++ break; ++ } ++unlock: ++ mutex_unlock(&ideapad_shared_mutex); ++} ++ ++static const struct ideapad_wmi_private ideapad_wmi_context_esc = { ++ .event = IDEAPAD_WMI_EVENT_ESC ++}; ++ ++static const struct ideapad_wmi_private ideapad_wmi_context_fn_keys = { ++ .event = IDEAPAD_WMI_EVENT_FN_KEYS ++}; ++ ++static const struct wmi_device_id ideapad_wmi_ids[] = { ++ { "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", &ideapad_wmi_context_esc }, /* Yoga 3 */ ++ { "56322276-8493-4CE8-A783-98C991274F5E", &ideapad_wmi_context_esc }, /* Yoga 700 */ ++ { "8FC0DE0C-B4E4-43FD-B0F3-8871711C1294", &ideapad_wmi_context_fn_keys }, /* Legion 5 */ ++ {}, ++}; ++MODULE_DEVICE_TABLE(wmi, ideapad_wmi_ids); ++ ++static struct wmi_driver ideapad_wmi_driver = { ++ .driver = { ++ .name = "ideapad_wmi", ++ }, ++ .id_table = ideapad_wmi_ids, ++ .probe = ideapad_wmi_probe, ++ .notify = ideapad_wmi_notify, ++}; ++ ++static int ideapad_wmi_driver_register(void) ++{ ++ return wmi_driver_register(&ideapad_wmi_driver); ++} ++ ++static void ideapad_wmi_driver_unregister(void) ++{ ++ return wmi_driver_unregister(&ideapad_wmi_driver); ++} ++ ++#else ++static inline int ideapad_wmi_driver_register(void) { return 0; } ++static inline void ideapad_wmi_driver_unregister(void) { } ++#endif ++ ++/* ++ * ACPI driver ++ */ + static int ideapad_acpi_add(struct platform_device *pdev) + { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); +@@ -1670,16 +1875,12 @@ static int ideapad_acpi_add(struct platform_device *pdev) + if (!priv->features.hw_rfkill_switch) + write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1); + +- /* The same for Touchpad */ +- if (!priv->features.touchpad_ctrl_via_ec) +- write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1); +- + for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) + if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) + ideapad_register_rfkill(priv, i); + + ideapad_sync_rfk_state(priv); +- ideapad_sync_touchpad_state(priv); ++ ideapad_sync_touchpad_state(priv, false); + + err = ideapad_dytc_profile_init(priv); + if (err) { +@@ -1703,30 +1904,16 @@ static int ideapad_acpi_add(struct platform_device *pdev) + goto notification_failed; + } + +-#if IS_ENABLED(CONFIG_ACPI_WMI) +- for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) { +- status = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i], +- ideapad_wmi_notify, priv); +- if (ACPI_SUCCESS(status)) { +- priv->fnesc_guid = ideapad_wmi_fnesc_events[i]; +- break; +- } +- } +- +- if (ACPI_FAILURE(status) && status != AE_NOT_EXIST) { +- err = -EIO; +- goto notification_failed_wmi; +- } +-#endif ++ err = ideapad_shared_init(priv); ++ if (err) ++ goto shared_init_failed; + + return 0; + +-#if IS_ENABLED(CONFIG_ACPI_WMI) +-notification_failed_wmi: ++shared_init_failed: + acpi_remove_notify_handler(priv->adev->handle, + ACPI_DEVICE_NOTIFY, + ideapad_acpi_notify); +-#endif + + notification_failed: + ideapad_backlight_exit(priv); +@@ -1752,10 +1939,7 @@ static int ideapad_acpi_remove(struct platform_device *pdev) + struct ideapad_private *priv = dev_get_drvdata(&pdev->dev); + int i; + +-#if IS_ENABLED(CONFIG_ACPI_WMI) +- if (priv->fnesc_guid) +- wmi_remove_notify_handler(priv->fnesc_guid); +-#endif ++ ideapad_shared_exit(priv); + + acpi_remove_notify_handler(priv->adev->handle, + ACPI_DEVICE_NOTIFY, +@@ -1781,7 +1965,7 @@ static int ideapad_acpi_resume(struct device *dev) + struct ideapad_private *priv = dev_get_drvdata(dev); + + ideapad_sync_rfk_state(priv); +- ideapad_sync_touchpad_state(priv); ++ ideapad_sync_touchpad_state(priv, false); + + if (priv->dytc) + dytc_profile_refresh(priv); +@@ -1807,7 +1991,30 @@ static struct platform_driver ideapad_acpi_driver = { + }, + }; + +-module_platform_driver(ideapad_acpi_driver); ++static int __init ideapad_laptop_init(void) ++{ ++ int err; ++ ++ err = ideapad_wmi_driver_register(); ++ if (err) ++ return err; ++ ++ err = platform_driver_register(&ideapad_acpi_driver); ++ if (err) { ++ ideapad_wmi_driver_unregister(); ++ return err; ++ } ++ ++ return 0; ++} ++module_init(ideapad_laptop_init) ++ ++static void __exit ideapad_laptop_exit(void) ++{ ++ ideapad_wmi_driver_unregister(); ++ platform_driver_unregister(&ideapad_acpi_driver); ++} ++module_exit(ideapad_laptop_exit) + + MODULE_AUTHOR("David Woodhouse "); + MODULE_DESCRIPTION("IdeaPad ACPI Extras"); +diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +index 8f9c571d72578..00ac7e381441a 100644 +--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c ++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +@@ -203,6 +203,7 @@ static const struct x86_cpu_id intel_uncore_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), ++ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL), + {} + }; + MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids); +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c +index 8476dfef4e626..a1d91736a03b8 100644 +--- a/drivers/platform/x86/thinkpad_acpi.c ++++ b/drivers/platform/x86/thinkpad_acpi.c +@@ -5572,6 +5572,7 @@ static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev) + static struct tpacpi_led_classdev tpacpi_led_thinklight = { + .led_classdev = { + .name = "tpacpi::thinklight", ++ .max_brightness = 1, + .brightness_set_blocking = &light_sysfs_set, + .brightness_get = &light_sysfs_get, + } +diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c +index 4acd6fa8d43b8..123a4618db55f 100644 +--- a/drivers/platform/x86/x86-android-tablets.c ++++ b/drivers/platform/x86/x86-android-tablets.c +@@ -5,7 +5,7 @@ + * devices typically have a bunch of things hardcoded, rather than specified + * in their DSDT. + * +- * Copyright (C) 2021 Hans de Goede ++ * Copyright (C) 2021-2022 Hans de Goede + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +@@ -265,6 +265,56 @@ static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = { + }, + }; + ++/* ++ * Advantech MICA-071 ++ * This is a standard Windows tablet, but it has an extra "quick launch" button ++ * which is not described in the ACPI tables in anyway. ++ * Use the x86-android-tablets infra to create a gpio-button device for this. ++ */ ++static struct gpio_keys_button advantech_mica_071_button = { ++ .code = KEY_PROG1, ++ /* .gpio gets filled in by advantech_mica_071_init() */ ++ .active_low = true, ++ .desc = "prog1_key", ++ .type = EV_KEY, ++ .wakeup = false, ++ .debounce_interval = 50, ++}; ++ ++static const struct gpio_keys_platform_data advantech_mica_071_button_pdata __initconst = { ++ .buttons = &advantech_mica_071_button, ++ .nbuttons = 1, ++ .name = "prog1_key", ++}; ++ ++static const struct platform_device_info advantech_mica_071_pdevs[] __initconst = { ++ { ++ .name = "gpio-keys", ++ .id = PLATFORM_DEVID_AUTO, ++ .data = &advantech_mica_071_button_pdata, ++ .size_data = sizeof(advantech_mica_071_button_pdata), ++ }, ++}; ++ ++static int __init advantech_mica_071_init(void) ++{ ++ struct gpio_desc *gpiod; ++ int ret; ++ ++ ret = x86_android_tablet_get_gpiod("INT33FC:00", 2, &gpiod); ++ if (ret < 0) ++ return ret; ++ advantech_mica_071_button.gpio = desc_to_gpio(gpiod); ++ ++ return 0; ++} ++ ++static const struct x86_dev_info advantech_mica_071_info __initconst = { ++ .pdev_info = advantech_mica_071_pdevs, ++ .pdev_count = ARRAY_SIZE(advantech_mica_071_pdevs), ++ .init = advantech_mica_071_init, ++}; ++ + /* Asus ME176C and TF103C tablets shared data */ + static struct gpio_keys_button asus_me176c_tf103c_lid = { + .code = SW_LID, +@@ -987,6 +1037,212 @@ static void lenovo_yoga_tab2_830_1050_exit(void) + } + } + ++/* Lenovo Yoga Tab 3 Pro YT3-X90F */ ++ ++/* ++ * There are 2 batteries, with 2 bq27500 fuel-gauges and 2 bq25892 chargers, ++ * "bq25890-charger-1" is instantiated from: drivers/i2c/busses/i2c-cht-wc.c. ++ */ ++static const char * const lenovo_yt3_bq25892_0_suppliers[] = { "cht_wcove_pwrsrc" }; ++static const char * const bq25890_1_psy[] = { "bq25890-charger-1" }; ++ ++static const struct property_entry fg_bq25890_1_supply_props[] = { ++ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq25890_1_psy), ++ { } ++}; ++ ++static const struct software_node fg_bq25890_1_supply_node = { ++ .properties = fg_bq25890_1_supply_props, ++}; ++ ++/* bq25892 charger settings for the flat lipo battery behind the screen */ ++static const struct property_entry lenovo_yt3_bq25892_0_props[] = { ++ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_0_suppliers), ++ PROPERTY_ENTRY_STRING("linux,power-supply-name", "bq25892-second-chrg"), ++ PROPERTY_ENTRY_U32("linux,iinlim-percentage", 40), ++ PROPERTY_ENTRY_BOOL("linux,skip-reset"), ++ /* Values taken from Android Factory Image */ ++ PROPERTY_ENTRY_U32("ti,charge-current", 2048000), ++ PROPERTY_ENTRY_U32("ti,battery-regulation-voltage", 4352000), ++ PROPERTY_ENTRY_U32("ti,termination-current", 128000), ++ PROPERTY_ENTRY_U32("ti,precharge-current", 128000), ++ PROPERTY_ENTRY_U32("ti,minimum-sys-voltage", 3700000), ++ PROPERTY_ENTRY_U32("ti,boost-voltage", 4998000), ++ PROPERTY_ENTRY_U32("ti,boost-max-current", 500000), ++ PROPERTY_ENTRY_BOOL("ti,use-ilim-pin"), ++ { } ++}; ++ ++static const struct software_node lenovo_yt3_bq25892_0_node = { ++ .properties = lenovo_yt3_bq25892_0_props, ++}; ++ ++static const struct x86_i2c_client_info lenovo_yt3_i2c_clients[] __initconst = { ++ { ++ /* bq27500 fuel-gauge for the flat lipo battery behind the screen */ ++ .board_info = { ++ .type = "bq27500", ++ .addr = 0x55, ++ .dev_name = "bq27500_0", ++ .swnode = &fg_bq25890_supply_node, ++ }, ++ .adapter_path = "\\_SB_.PCI0.I2C1", ++ }, { ++ /* bq25892 charger for the flat lipo battery behind the screen */ ++ .board_info = { ++ .type = "bq25892", ++ .addr = 0x6b, ++ .dev_name = "bq25892_0", ++ .swnode = &lenovo_yt3_bq25892_0_node, ++ }, ++ .adapter_path = "\\_SB_.PCI0.I2C1", ++ .irq_data = { ++ .type = X86_ACPI_IRQ_TYPE_GPIOINT, ++ .chip = "INT33FF:01", ++ .index = 5, ++ .trigger = ACPI_EDGE_SENSITIVE, ++ .polarity = ACPI_ACTIVE_LOW, ++ }, ++ }, { ++ /* bq27500 fuel-gauge for the round li-ion cells in the hinge */ ++ .board_info = { ++ .type = "bq27500", ++ .addr = 0x55, ++ .dev_name = "bq27500_1", ++ .swnode = &fg_bq25890_1_supply_node, ++ }, ++ .adapter_path = "\\_SB_.PCI0.I2C2", ++ } ++}; ++ ++static int __init lenovo_yt3_init(void) ++{ ++ struct gpio_desc *gpiod; ++ int ret; ++ ++ /* ++ * The "bq25892_0" charger IC has its /CE (Charge-Enable) and OTG pins ++ * connected to GPIOs, rather then having them hardwired to the correct ++ * values as is normally done. ++ * ++ * The bq25890_charger driver controls these through I2C, but this only ++ * works if not overridden by the pins. Set these pins here: ++ * 1. Set /CE to 0 to allow charging. ++ * 2. Set OTG to 0 disable V5 boost output since the 5V boost output of ++ * the main "bq25892_1" charger is used when necessary. ++ */ ++ ++ /* /CE pin */ ++ ret = x86_android_tablet_get_gpiod("INT33FF:02", 22, &gpiod); ++ if (ret < 0) ++ return ret; ++ ++ /* ++ * The gpio_desc returned by x86_android_tablet_get_gpiod() is a "raw" ++ * gpio_desc, that is there is no way to pass lookup-flags like ++ * GPIO_ACTIVE_LOW. Set the GPIO to 0 here to enable charging since ++ * the /CE pin is active-low, but not marked as such in the gpio_desc. ++ */ ++ gpiod_set_value(gpiod, 0); ++ ++ /* OTG pin */ ++ ret = x86_android_tablet_get_gpiod("INT33FF:03", 19, &gpiod); ++ if (ret < 0) ++ return ret; ++ ++ gpiod_set_value(gpiod, 0); ++ ++ return 0; ++} ++ ++static const struct x86_dev_info lenovo_yt3_info __initconst = { ++ .i2c_client_info = lenovo_yt3_i2c_clients, ++ .i2c_client_count = ARRAY_SIZE(lenovo_yt3_i2c_clients), ++ .init = lenovo_yt3_init, ++}; ++ ++/* Medion Lifetab S10346 tablets have an Android factory img with everything hardcoded */ ++static const char * const medion_lifetab_s10346_accel_mount_matrix[] = { ++ "0", "1", "0", ++ "1", "0", "0", ++ "0", "0", "1" ++}; ++ ++static const struct property_entry medion_lifetab_s10346_accel_props[] = { ++ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", medion_lifetab_s10346_accel_mount_matrix), ++ { } ++}; ++ ++static const struct software_node medion_lifetab_s10346_accel_node = { ++ .properties = medion_lifetab_s10346_accel_props, ++}; ++ ++/* Note the LCD panel is mounted upside down, this is correctly indicated in the VBT */ ++static const struct property_entry medion_lifetab_s10346_touchscreen_props[] = { ++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), ++ { } ++}; ++ ++static const struct software_node medion_lifetab_s10346_touchscreen_node = { ++ .properties = medion_lifetab_s10346_touchscreen_props, ++}; ++ ++static const struct x86_i2c_client_info medion_lifetab_s10346_i2c_clients[] __initconst = { ++ { ++ /* kxtj21009 accel */ ++ .board_info = { ++ .type = "kxtj21009", ++ .addr = 0x0f, ++ .dev_name = "kxtj21009", ++ .swnode = &medion_lifetab_s10346_accel_node, ++ }, ++ .adapter_path = "\\_SB_.I2C3", ++ .irq_data = { ++ .type = X86_ACPI_IRQ_TYPE_GPIOINT, ++ .chip = "INT33FC:02", ++ .index = 23, ++ .trigger = ACPI_EDGE_SENSITIVE, ++ .polarity = ACPI_ACTIVE_HIGH, ++ }, ++ }, { ++ /* goodix touchscreen */ ++ .board_info = { ++ .type = "GDIX1001:00", ++ .addr = 0x14, ++ .dev_name = "goodix_ts", ++ .swnode = &medion_lifetab_s10346_touchscreen_node, ++ }, ++ .adapter_path = "\\_SB_.I2C4", ++ .irq_data = { ++ .type = X86_ACPI_IRQ_TYPE_APIC, ++ .index = 0x44, ++ .trigger = ACPI_EDGE_SENSITIVE, ++ .polarity = ACPI_ACTIVE_LOW, ++ }, ++ }, ++}; ++ ++static struct gpiod_lookup_table medion_lifetab_s10346_goodix_gpios = { ++ .dev_id = "i2c-goodix_ts", ++ .table = { ++ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH), ++ GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH), ++ { } ++ }, ++}; ++ ++static struct gpiod_lookup_table * const medion_lifetab_s10346_gpios[] = { ++ &medion_lifetab_s10346_goodix_gpios, ++ NULL ++}; ++ ++static const struct x86_dev_info medion_lifetab_s10346_info __initconst = { ++ .i2c_client_info = medion_lifetab_s10346_i2c_clients, ++ .i2c_client_count = ARRAY_SIZE(medion_lifetab_s10346_i2c_clients), ++ .gpiod_lookup_tables = medion_lifetab_s10346_gpios, ++}; ++ + /* Nextbook Ares 8 tablets have an Android factory img with everything hardcoded */ + static const char * const nextbook_ares8_accel_mount_matrix[] = { + "0", "-1", "0", +@@ -1179,6 +1435,14 @@ static const struct x86_dev_info xiaomi_mipad2_info __initconst = { + }; + + static const struct dmi_system_id x86_android_tablet_ids[] __initconst = { ++ { ++ /* Advantech MICA-071 */ ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"), ++ }, ++ .driver_data = (void *)&advantech_mica_071_info, ++ }, + { + /* Asus MeMO Pad 7 ME176C */ + .matches = { +@@ -1245,6 +1509,25 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = { + }, + .driver_data = (void *)&lenovo_yoga_tab2_830_1050_info, + }, ++ { ++ /* Lenovo Yoga Tab 3 Pro YT3-X90F */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), ++ }, ++ .driver_data = (void *)&lenovo_yt3_info, ++ }, ++ { ++ /* Medion Lifetab S10346 */ ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), ++ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), ++ /* Above strings are much too generic, also match on BIOS date */ ++ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"), ++ }, ++ .driver_data = (void *)&medion_lifetab_s10346_info, ++ }, + { + /* Nextbook Ares 8 */ + .matches = { +diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c +index 899aa8dd12f07..95da1cbefacf0 100644 +--- a/drivers/remoteproc/imx_dsp_rproc.c ++++ b/drivers/remoteproc/imx_dsp_rproc.c +@@ -347,9 +347,6 @@ static int imx_dsp_rproc_stop(struct rproc *rproc) + struct device *dev = rproc->dev.parent; + int ret = 0; + +- /* Make sure work is finished */ +- flush_work(&priv->rproc_work); +- + if (rproc->state == RPROC_CRASHED) { + priv->flags &= ~REMOTE_IS_READY; + return 0; +@@ -432,9 +429,18 @@ static void imx_dsp_rproc_vq_work(struct work_struct *work) + { + struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc, + rproc_work); ++ struct rproc *rproc = priv->rproc; ++ ++ mutex_lock(&rproc->lock); ++ ++ if (rproc->state != RPROC_RUNNING) ++ goto unlock_mutex; + + rproc_vq_interrupt(priv->rproc, 0); + rproc_vq_interrupt(priv->rproc, 1); ++ ++unlock_mutex: ++ mutex_unlock(&rproc->lock); + } + + /** +diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c +index 7cc4fd207e2d8..596e1440cca56 100644 +--- a/drivers/remoteproc/imx_rproc.c ++++ b/drivers/remoteproc/imx_rproc.c +@@ -113,8 +113,8 @@ static const struct imx_rproc_att imx_rproc_att_imx93[] = { + { 0x80000000, 0x80000000, 0x10000000, 0 }, + { 0x90000000, 0x80000000, 0x10000000, 0 }, + +- { 0xC0000000, 0xa0000000, 0x10000000, 0 }, +- { 0xD0000000, 0xa0000000, 0x10000000, 0 }, ++ { 0xC0000000, 0xC0000000, 0x10000000, 0 }, ++ { 0xD0000000, 0xC0000000, 0x10000000, 0 }, + }; + + static const struct imx_rproc_att imx_rproc_att_imx8mn[] = { +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c +index cb1d414a23896..c3f194d9384da 100644 +--- a/drivers/remoteproc/remoteproc_core.c ++++ b/drivers/remoteproc/remoteproc_core.c +@@ -1868,12 +1868,18 @@ static void rproc_crash_handler_work(struct work_struct *work) + + mutex_lock(&rproc->lock); + +- if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { ++ if (rproc->state == RPROC_CRASHED) { + /* handle only the first crash detected */ + mutex_unlock(&rproc->lock); + return; + } + ++ if (rproc->state == RPROC_OFFLINE) { ++ /* Don't recover if the remote processor was stopped */ ++ mutex_unlock(&rproc->lock); ++ goto out; ++ } ++ + rproc->state = RPROC_CRASHED; + dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, + rproc->name); +@@ -1883,6 +1889,7 @@ static void rproc_crash_handler_work(struct work_struct *work) + if (!rproc->recovery_disabled) + rproc_trigger_recovery(rproc); + ++out: + pm_relax(rproc->dev.parent); + } + +diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c +index 157bf5209ac40..a40c1a52df659 100644 +--- a/drivers/rtc/rtc-ds1347.c ++++ b/drivers/rtc/rtc-ds1347.c +@@ -112,7 +112,7 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt) + return err; + + century = (dt->tm_year / 100) + 19; +- err = regmap_write(map, DS1347_CENTURY_REG, century); ++ err = regmap_write(map, DS1347_CENTURY_REG, bin2bcd(century)); + if (err) + return err; + +diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig +index 024e420f1bb77..ae504c43d9e74 100644 +--- a/drivers/soc/qcom/Kconfig ++++ b/drivers/soc/qcom/Kconfig +@@ -63,6 +63,7 @@ config QCOM_GSBI + config QCOM_LLCC + tristate "Qualcomm Technologies, Inc. LLCC driver" + depends on ARCH_QCOM || COMPILE_TEST ++ select REGMAP_MMIO + help + Qualcomm Technologies, Inc. platform specific + Last Level Cache Controller(LLCC) driver for platforms such as, +@@ -236,6 +237,7 @@ config QCOM_ICC_BWMON + tristate "QCOM Interconnect Bandwidth Monitor driver" + depends on ARCH_QCOM || COMPILE_TEST + select PM_OPP ++ select REGMAP_MMIO + help + Sets up driver monitoring bandwidth on various interconnects and + based on that voting for interconnect bandwidth, adjusting their +diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c +index a9472e0e5d61c..27d6e25a01153 100644 +--- a/drivers/soc/ux500/ux500-soc-id.c ++++ b/drivers/soc/ux500/ux500-soc-id.c +@@ -167,20 +167,18 @@ ATTRIBUTE_GROUPS(ux500_soc); + static const char *db8500_read_soc_id(struct device_node *backupram) + { + void __iomem *base; +- void __iomem *uid; + const char *retstr; ++ u32 uid[5]; + + base = of_iomap(backupram, 0); + if (!base) + return NULL; +- uid = base + 0x1fc0; ++ memcpy_fromio(uid, base + 0x1fc0, sizeof(uid)); + + /* Throw these device-specific numbers into the entropy pool */ +- add_device_randomness(uid, 0x14); ++ add_device_randomness(uid, sizeof(uid)); + retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", +- readl((u32 *)uid+0), +- readl((u32 *)uid+1), readl((u32 *)uid+2), +- readl((u32 *)uid+3), readl((u32 *)uid+4)); ++ uid[0], uid[1], uid[2], uid[3], uid[4]); + iounmap(base); + return retstr; + } +diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c +index ce13e746c15f3..e530767e80a5d 100644 +--- a/drivers/staging/media/ipu3/ipu3-v4l2.c ++++ b/drivers/staging/media/ipu3/ipu3-v4l2.c +@@ -188,6 +188,28 @@ static int imgu_subdev_set_fmt(struct v4l2_subdev *sd, + return 0; + } + ++static struct v4l2_rect * ++imgu_subdev_get_crop(struct imgu_v4l2_subdev *sd, ++ struct v4l2_subdev_state *sd_state, unsigned int pad, ++ enum v4l2_subdev_format_whence which) ++{ ++ if (which == V4L2_SUBDEV_FORMAT_TRY) ++ return v4l2_subdev_get_try_crop(&sd->subdev, sd_state, pad); ++ else ++ return &sd->rect.eff; ++} ++ ++static struct v4l2_rect * ++imgu_subdev_get_compose(struct imgu_v4l2_subdev *sd, ++ struct v4l2_subdev_state *sd_state, unsigned int pad, ++ enum v4l2_subdev_format_whence which) ++{ ++ if (which == V4L2_SUBDEV_FORMAT_TRY) ++ return v4l2_subdev_get_try_compose(&sd->subdev, sd_state, pad); ++ else ++ return &sd->rect.bds; ++} ++ + static int imgu_subdev_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_selection *sel) +@@ -200,18 +222,12 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd, + + switch (sel->target) { + case V4L2_SEL_TGT_CROP: +- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) +- sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, +- sel->pad); +- else +- sel->r = imgu_sd->rect.eff; ++ sel->r = *imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad, ++ sel->which); + return 0; + case V4L2_SEL_TGT_COMPOSE: +- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) +- sel->r = *v4l2_subdev_get_try_compose(sd, sd_state, +- sel->pad); +- else +- sel->r = imgu_sd->rect.bds; ++ sel->r = *imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad, ++ sel->which); + return 0; + default: + return -EINVAL; +@@ -223,10 +239,9 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_selection *sel) + { + struct imgu_device *imgu = v4l2_get_subdevdata(sd); +- struct imgu_v4l2_subdev *imgu_sd = container_of(sd, +- struct imgu_v4l2_subdev, +- subdev); +- struct v4l2_rect *rect, *try_sel; ++ struct imgu_v4l2_subdev *imgu_sd = ++ container_of(sd, struct imgu_v4l2_subdev, subdev); ++ struct v4l2_rect *rect; + + dev_dbg(&imgu->pci_dev->dev, + "set subdev %u sel which %u target 0x%4x rect [%ux%u]", +@@ -238,22 +253,18 @@ static int imgu_subdev_set_selection(struct v4l2_subdev *sd, + + switch (sel->target) { + case V4L2_SEL_TGT_CROP: +- try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad); +- rect = &imgu_sd->rect.eff; ++ rect = imgu_subdev_get_crop(imgu_sd, sd_state, sel->pad, ++ sel->which); + break; + case V4L2_SEL_TGT_COMPOSE: +- try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad); +- rect = &imgu_sd->rect.bds; ++ rect = imgu_subdev_get_compose(imgu_sd, sd_state, sel->pad, ++ sel->which); + break; + default: + return -EINVAL; + } + +- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) +- *try_sel = sel->r; +- else +- *rect = sel->r; +- ++ *rect = sel->r; + return 0; + } + +diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c +index b26e44adb2be7..426e653bd55d5 100644 +--- a/drivers/staging/media/tegra-video/csi.c ++++ b/drivers/staging/media/tegra-video/csi.c +@@ -433,7 +433,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi, + for (i = 0; i < chan->numgangports; i++) + chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK; + +- chan->of_node = node; ++ chan->of_node = of_node_get(node); + chan->numpads = num_pads; + if (num_pads & 0x2) { + chan->pads[0].flags = MEDIA_PAD_FL_SINK; +@@ -448,6 +448,7 @@ static int tegra_csi_channel_alloc(struct tegra_csi *csi, + chan->mipi = tegra_mipi_request(csi->dev, node); + if (IS_ERR(chan->mipi)) { + ret = PTR_ERR(chan->mipi); ++ chan->mipi = NULL; + dev_err(csi->dev, "failed to get mipi device: %d\n", ret); + } + +@@ -640,6 +641,7 @@ static void tegra_csi_channels_cleanup(struct tegra_csi *csi) + media_entity_cleanup(&subdev->entity); + } + ++ of_node_put(chan->of_node); + list_del(&chan->list); + kfree(chan); + } +diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h +index 4ee05a1785cfa..6960ea2e3d360 100644 +--- a/drivers/staging/media/tegra-video/csi.h ++++ b/drivers/staging/media/tegra-video/csi.h +@@ -56,7 +56,7 @@ struct tegra_csi; + * @framerate: active framerate for TPG + * @h_blank: horizontal blanking for TPG active format + * @v_blank: vertical blanking for TPG active format +- * @mipi: mipi device for corresponding csi channel pads ++ * @mipi: mipi device for corresponding csi channel pads, or NULL if not applicable (TPG, error) + * @pixel_rate: active pixel rate from the sensor on this channel + */ + struct tegra_csi_channel { +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index 18374a6d05bdf..18cf801ab5908 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -433,6 +433,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + u64 wanted_disk_byte = ref->wanted_disk_byte; + u64 count = 0; + u64 data_offset; ++ u8 type; + + if (level != 0) { + eb = path->nodes[level]; +@@ -487,6 +488,9 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, + continue; + } + fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); ++ type = btrfs_file_extent_type(eb, fi); ++ if (type == BTRFS_FILE_EXTENT_INLINE) ++ goto next; + disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); + data_offset = btrfs_file_extent_offset(eb, fi); + +diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c +index 3676580c2d97e..7b93719a486c5 100644 +--- a/fs/btrfs/extent-io-tree.c ++++ b/fs/btrfs/extent-io-tree.c +@@ -397,7 +397,7 @@ static int insert_state(struct extent_io_tree *tree, + u32 bits, struct extent_changeset *changeset) + { + struct rb_node **node; +- struct rb_node *parent; ++ struct rb_node *parent = NULL; + const u64 end = state->end; + + set_state_bits(tree, state, bits, changeset); +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c +index 635f45f1a2ef8..dba087ad40ea2 100644 +--- a/fs/btrfs/volumes.c ++++ b/fs/btrfs/volumes.c +@@ -7241,8 +7241,9 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, + map->stripes[i].dev = handle_missing_device(fs_info, + devid, uuid); + if (IS_ERR(map->stripes[i].dev)) { ++ ret = PTR_ERR(map->stripes[i].dev); + free_extent_map(em); +- return PTR_ERR(map->stripes[i].dev); ++ return ret; + } + } + +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 712a431614480..6094cb2ff099b 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -678,9 +678,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root) + seq_printf(s, ",echo_interval=%lu", + tcon->ses->server->echo_interval / HZ); + +- /* Only display max_credits if it was overridden on mount */ ++ /* Only display the following if overridden on mount */ + if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) + seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); ++ if (tcon->ses->server->tcp_nodelay) ++ seq_puts(s, ",tcpnodelay"); ++ if (tcon->ses->server->noautotune) ++ seq_puts(s, ",noautotune"); ++ if (tcon->ses->server->noblocksnd) ++ seq_puts(s, ",noblocksend"); + + if (tcon->snapshot_time) + seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 9db9527c61cfc..7e7f712f97fd8 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -279,8 +279,10 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, + tcon->need_reconnect = true; + tcon->status = TID_NEED_RECON; + } +- if (ses->tcon_ipc) ++ if (ses->tcon_ipc) { + ses->tcon_ipc->need_reconnect = true; ++ ses->tcon_ipc->status = TID_NEED_RECON; ++ } + + next_session: + spin_unlock(&ses->chan_lock); +@@ -1871,6 +1873,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) + + cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); + ++ spin_lock(&tcon->tc_lock); ++ tcon->status = TID_GOOD; ++ spin_unlock(&tcon->tc_lock); + ses->tcon_ipc = tcon; + out: + return rc; +@@ -2157,7 +2162,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), + struct cifs_ses * + cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) + { +- int rc = -ENOMEM; ++ int rc = 0; + unsigned int xid; + struct cifs_ses *ses; + struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; +@@ -2206,6 +2211,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) + return ses; + } + ++ rc = -ENOMEM; ++ + cifs_dbg(FYI, "Existing smb sess not found\n"); + ses = sesInfoAlloc(); + if (ses == NULL) +@@ -2278,10 +2285,10 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) + list_add(&ses->smb_ses_list, &server->smb_ses_list); + spin_unlock(&cifs_tcp_ses_lock); + +- free_xid(xid); +- + cifs_setup_ipc(ses, ctx); + ++ free_xid(xid); ++ + return ses; + + get_ses_fail: +@@ -2600,6 +2607,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) + tcon->nodelete = ctx->nodelete; + tcon->local_lease = ctx->local_lease; + INIT_LIST_HEAD(&tcon->pending_opens); ++ tcon->status = TID_GOOD; + + /* schedule query interfaces poll */ + INIT_DELAYED_WORK(&tcon->query_interfaces, +diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c +index 59f64c596233b..871d4e9f49fb6 100644 +--- a/fs/dlm/lowcomms.c ++++ b/fs/dlm/lowcomms.c +@@ -1543,7 +1543,11 @@ static void process_recv_sockets(struct work_struct *work) + + static void process_listen_recv_socket(struct work_struct *work) + { +- accept_from_sock(&listen_con); ++ int ret; ++ ++ do { ++ ret = accept_from_sock(&listen_con); ++ } while (!ret); + } + + static void dlm_connect(struct connection *con) +@@ -1820,7 +1824,7 @@ static int dlm_listen_for_all(void) + result = sock->ops->listen(sock, 5); + if (result < 0) { + dlm_close_sock(&listen_con.sock); +- goto out; ++ return result; + } + + return 0; +@@ -2023,7 +2027,6 @@ fail_listen: + dlm_proto_ops = NULL; + fail_proto_ops: + dlm_allow_conn = 0; +- dlm_close_sock(&listen_con.sock); + work_stop(); + fail_local: + deinit_local(); +diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c +index 8f597753ac129..5202eddfc3c0a 100644 +--- a/fs/ext2/dir.c ++++ b/fs/ext2/dir.c +@@ -679,7 +679,7 @@ int ext2_empty_dir (struct inode * inode) + page = ext2_get_page(inode, i, 0, &page_addr); + + if (IS_ERR(page)) +- goto not_empty; ++ return 0; + + kaddr = page_addr; + de = (ext2_dirent *)kaddr; +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 8d5453852f98e..4e739902dc03a 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -558,7 +558,7 @@ enum { + * + * It's not paranoia if the Murphy's Law really *is* out to get you. :-) + */ +-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG)) ++#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG)) + #define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG)) + + static inline void ext4_check_flag_values(void) +@@ -2964,7 +2964,8 @@ int do_journal_get_write_access(handle_t *handle, struct inode *inode, + typedef enum { + EXT4_IGET_NORMAL = 0, + EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */ +- EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */ ++ EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */ ++ EXT4_IGET_BAD = 0x0004 /* Allow to iget a bad inode */ + } ext4_iget_flags; + + extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, +@@ -3619,8 +3620,8 @@ extern void ext4_initialize_dirent_tail(struct buffer_head *bh, + unsigned int blocksize); + extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode, + struct buffer_head *bh); +-extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name, +- struct inode *inode); ++extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name, ++ struct inode *inode, struct dentry *dentry); + extern int __ext4_link(struct inode *dir, struct inode *inode, + struct dentry *dentry); + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 6c399a8b22b35..36225ef56b0cd 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -5799,6 +5799,14 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) + struct ext4_extent *extent; + ext4_lblk_t first_lblk, first_lclu, last_lclu; + ++ /* ++ * if data can be stored inline, the logical cluster isn't ++ * mapped - no physical clusters have been allocated, and the ++ * file has no extents ++ */ ++ if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) ++ return 0; ++ + /* search for the extent closest to the first block in the cluster */ + path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); + if (IS_ERR(path)) { +diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c +index cd0a861853e3f..7ada374ff27d7 100644 +--- a/fs/ext4/extents_status.c ++++ b/fs/ext4/extents_status.c +@@ -1371,7 +1371,7 @@ retry: + if (count_reserved) + count_rsvd(inode, lblk, orig_es.es_len - len1 - len2, + &orig_es, &rc); +- goto out; ++ goto out_get_reserved; + } + + if (len1 > 0) { +@@ -1413,6 +1413,7 @@ retry: + } + } + ++out_get_reserved: + if (count_reserved) + *reserved = get_rsvd(inode, end, es, &rc); + out: +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c +index 0f6d0a80467d7..7ed71c652f67f 100644 +--- a/fs/ext4/fast_commit.c ++++ b/fs/ext4/fast_commit.c +@@ -420,25 +420,34 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update) + struct __track_dentry_update_args *dentry_update = + (struct __track_dentry_update_args *)arg; + struct dentry *dentry = dentry_update->dentry; +- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ++ struct inode *dir = dentry->d_parent->d_inode; ++ struct super_block *sb = inode->i_sb; ++ struct ext4_sb_info *sbi = EXT4_SB(sb); + + mutex_unlock(&ei->i_fc_lock); ++ ++ if (IS_ENCRYPTED(dir)) { ++ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME, ++ NULL); ++ mutex_lock(&ei->i_fc_lock); ++ return -EOPNOTSUPP; ++ } ++ + node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS); + if (!node) { +- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); ++ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL); + mutex_lock(&ei->i_fc_lock); + return -ENOMEM; + } + + node->fcd_op = dentry_update->op; +- node->fcd_parent = dentry->d_parent->d_inode->i_ino; ++ node->fcd_parent = dir->i_ino; + node->fcd_ino = inode->i_ino; + if (dentry->d_name.len > DNAME_INLINE_LEN) { + node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS); + if (!node->fcd_name.name) { + kmem_cache_free(ext4_fc_dentry_cachep, node); +- ext4_fc_mark_ineligible(inode->i_sb, +- EXT4_FC_REASON_NOMEM, NULL); ++ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL); + mutex_lock(&ei->i_fc_lock); + return -ENOMEM; + } +@@ -666,6 +675,15 @@ static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail) + + /* Ext4 commit path routines */ + ++/* memcpy to fc reserved space and update CRC */ ++static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src, ++ int len, u32 *crc) ++{ ++ if (crc) ++ *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len); ++ return memcpy(dst, src, len); ++} ++ + /* memzero and update CRC */ + static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len, + u32 *crc) +@@ -691,62 +709,59 @@ static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len, + */ + static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc) + { +- struct ext4_fc_tl *tl; ++ struct ext4_fc_tl tl; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct buffer_head *bh; + int bsize = sbi->s_journal->j_blocksize; + int ret, off = sbi->s_fc_bytes % bsize; +- int pad_len; ++ int remaining; ++ u8 *dst; + + /* +- * After allocating len, we should have space at least for a 0 byte +- * padding. ++ * If 'len' is too long to fit in any block alongside a PAD tlv, then we ++ * cannot fulfill the request. + */ +- if (len + EXT4_FC_TAG_BASE_LEN > bsize) ++ if (len > bsize - EXT4_FC_TAG_BASE_LEN) + return NULL; + +- if (bsize - off - 1 > len + EXT4_FC_TAG_BASE_LEN) { +- /* +- * Only allocate from current buffer if we have enough space for +- * this request AND we have space to add a zero byte padding. +- */ +- if (!sbi->s_fc_bh) { +- ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); +- if (ret) +- return NULL; +- sbi->s_fc_bh = bh; +- } ++ if (!sbi->s_fc_bh) { ++ ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); ++ if (ret) ++ return NULL; ++ sbi->s_fc_bh = bh; ++ } ++ dst = sbi->s_fc_bh->b_data + off; ++ ++ /* ++ * Allocate the bytes in the current block if we can do so while still ++ * leaving enough space for a PAD tlv. ++ */ ++ remaining = bsize - EXT4_FC_TAG_BASE_LEN - off; ++ if (len <= remaining) { + sbi->s_fc_bytes += len; +- return sbi->s_fc_bh->b_data + off; ++ return dst; + } +- /* Need to add PAD tag */ +- tl = (struct ext4_fc_tl *)(sbi->s_fc_bh->b_data + off); +- tl->fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); +- pad_len = bsize - off - 1 - EXT4_FC_TAG_BASE_LEN; +- tl->fc_len = cpu_to_le16(pad_len); +- if (crc) +- *crc = ext4_chksum(sbi, *crc, tl, EXT4_FC_TAG_BASE_LEN); +- if (pad_len > 0) +- ext4_fc_memzero(sb, tl + 1, pad_len, crc); ++ ++ /* ++ * Else, terminate the current block with a PAD tlv, then allocate a new ++ * block and allocate the bytes at the start of that new block. ++ */ ++ ++ tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD); ++ tl.fc_len = cpu_to_le16(remaining); ++ ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc); ++ ext4_fc_memzero(sb, dst + EXT4_FC_TAG_BASE_LEN, remaining, crc); ++ + ext4_fc_submit_bh(sb, false); + + ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh); + if (ret) + return NULL; + sbi->s_fc_bh = bh; +- sbi->s_fc_bytes = (sbi->s_fc_bytes / bsize + 1) * bsize + len; ++ sbi->s_fc_bytes += bsize - off + len; + return sbi->s_fc_bh->b_data; + } + +-/* memcpy to fc reserved space and update CRC */ +-static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src, +- int len, u32 *crc) +-{ +- if (crc) +- *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len); +- return memcpy(dst, src, len); +-} +- + /* + * Complete a fast commit by writing tail tag. + * +@@ -774,7 +789,7 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc) + off = sbi->s_fc_bytes % bsize; + + tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL); +- tl.fc_len = cpu_to_le16(bsize - off - 1 + sizeof(struct ext4_fc_tail)); ++ tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail)); + sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize); + + ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, &crc); +@@ -784,6 +799,8 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc) + dst += sizeof(tail.fc_tid); + tail.fc_crc = cpu_to_le32(crc); + ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL); ++ dst += sizeof(tail.fc_crc); ++ memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */ + + ext4_fc_submit_bh(sb, true); + +@@ -1388,7 +1405,7 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl, + return 0; + } + +- ret = __ext4_unlink(NULL, old_parent, &entry, inode); ++ ret = __ext4_unlink(old_parent, &entry, inode, NULL); + /* -ENOENT ok coz it might not exist anymore. */ + if (ret == -ENOENT) + ret = 0; +@@ -1977,32 +1994,31 @@ void ext4_fc_replay_cleanup(struct super_block *sb) + kfree(sbi->s_fc_replay_state.fc_modified_inodes); + } + +-static inline bool ext4_fc_tag_len_isvalid(struct ext4_fc_tl *tl, +- u8 *val, u8 *end) ++static bool ext4_fc_value_len_isvalid(struct ext4_sb_info *sbi, ++ int tag, int len) + { +- if (val + tl->fc_len > end) +- return false; +- +- /* Here only check ADD_RANGE/TAIL/HEAD which will read data when do +- * journal rescan before do CRC check. Other tags length check will +- * rely on CRC check. +- */ +- switch (tl->fc_tag) { ++ switch (tag) { + case EXT4_FC_TAG_ADD_RANGE: +- return (sizeof(struct ext4_fc_add_range) == tl->fc_len); +- case EXT4_FC_TAG_TAIL: +- return (sizeof(struct ext4_fc_tail) <= tl->fc_len); +- case EXT4_FC_TAG_HEAD: +- return (sizeof(struct ext4_fc_head) == tl->fc_len); ++ return len == sizeof(struct ext4_fc_add_range); + case EXT4_FC_TAG_DEL_RANGE: ++ return len == sizeof(struct ext4_fc_del_range); ++ case EXT4_FC_TAG_CREAT: + case EXT4_FC_TAG_LINK: + case EXT4_FC_TAG_UNLINK: +- case EXT4_FC_TAG_CREAT: ++ len -= sizeof(struct ext4_fc_dentry_info); ++ return len >= 1 && len <= EXT4_NAME_LEN; + case EXT4_FC_TAG_INODE: ++ len -= sizeof(struct ext4_fc_inode); ++ return len >= EXT4_GOOD_OLD_INODE_SIZE && ++ len <= sbi->s_inode_size; + case EXT4_FC_TAG_PAD: +- default: +- return true; ++ return true; /* padding can have any length */ ++ case EXT4_FC_TAG_TAIL: ++ return len >= sizeof(struct ext4_fc_tail); ++ case EXT4_FC_TAG_HEAD: ++ return len == sizeof(struct ext4_fc_head); + } ++ return false; + } + + /* +@@ -2040,7 +2056,7 @@ static int ext4_fc_replay_scan(journal_t *journal, + state = &sbi->s_fc_replay_state; + + start = (u8 *)bh->b_data; +- end = (__u8 *)bh->b_data + journal->j_blocksize - 1; ++ end = start + journal->j_blocksize; + + if (state->fc_replay_expected_off == 0) { + state->fc_cur_tag = 0; +@@ -2061,11 +2077,12 @@ static int ext4_fc_replay_scan(journal_t *journal, + } + + state->fc_replay_expected_off++; +- for (cur = start; cur < end - EXT4_FC_TAG_BASE_LEN; ++ for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN; + cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) { + ext4_fc_get_tl(&tl, cur); + val = cur + EXT4_FC_TAG_BASE_LEN; +- if (!ext4_fc_tag_len_isvalid(&tl, val, end)) { ++ if (tl.fc_len > end - val || ++ !ext4_fc_value_len_isvalid(sbi, tl.fc_tag, tl.fc_len)) { + ret = state->fc_replay_num_tags ? + JBD2_FC_REPLAY_STOP : -ECANCELED; + goto out_err; +@@ -2178,9 +2195,9 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, + #endif + + start = (u8 *)bh->b_data; +- end = (__u8 *)bh->b_data + journal->j_blocksize - 1; ++ end = start + journal->j_blocksize; + +- for (cur = start; cur < end - EXT4_FC_TAG_BASE_LEN; ++ for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN; + cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) { + ext4_fc_get_tl(&tl, cur); + val = cur + EXT4_FC_TAG_BASE_LEN; +@@ -2249,17 +2266,17 @@ void ext4_fc_init(struct super_block *sb, journal_t *journal) + journal->j_fc_cleanup_callback = ext4_fc_cleanup; + } + +-static const char *fc_ineligible_reasons[] = { +- "Extended attributes changed", +- "Cross rename", +- "Journal flag changed", +- "Insufficient memory", +- "Swap boot", +- "Resize", +- "Dir renamed", +- "Falloc range op", +- "Data journalling", +- "FC Commit Failed" ++static const char * const fc_ineligible_reasons[] = { ++ [EXT4_FC_REASON_XATTR] = "Extended attributes changed", ++ [EXT4_FC_REASON_CROSS_RENAME] = "Cross rename", ++ [EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed", ++ [EXT4_FC_REASON_NOMEM] = "Insufficient memory", ++ [EXT4_FC_REASON_SWAP_BOOT] = "Swap boot", ++ [EXT4_FC_REASON_RESIZE] = "Resize", ++ [EXT4_FC_REASON_RENAME_DIR] = "Dir renamed", ++ [EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op", ++ [EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling", ++ [EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename", + }; + + int ext4_fc_info_show(struct seq_file *seq, void *v) +diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h +index a6154c3ed1357..2fadb2c4780c8 100644 +--- a/fs/ext4/fast_commit.h ++++ b/fs/ext4/fast_commit.h +@@ -58,7 +58,7 @@ struct ext4_fc_dentry_info { + __u8 fc_dname[]; + }; + +-/* Value structure for EXT4_FC_TAG_INODE and EXT4_FC_TAG_INODE_PARTIAL. */ ++/* Value structure for EXT4_FC_TAG_INODE. */ + struct ext4_fc_inode { + __le32 fc_ino; + __u8 fc_raw_inode[]; +@@ -96,6 +96,7 @@ enum { + EXT4_FC_REASON_RENAME_DIR, + EXT4_FC_REASON_FALLOC_RANGE, + EXT4_FC_REASON_INODE_JOURNAL_DATA, ++ EXT4_FC_REASON_ENCRYPTED_FILENAME, + EXT4_FC_REASON_MAX + }; + +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c +index 860fc51190098..c68bebe7ff4b6 100644 +--- a/fs/ext4/indirect.c ++++ b/fs/ext4/indirect.c +@@ -148,6 +148,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, + struct super_block *sb = inode->i_sb; + Indirect *p = chain; + struct buffer_head *bh; ++ unsigned int key; + int ret = -EIO; + + *err = 0; +@@ -156,7 +157,13 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth, + if (!p->key) + goto no_block; + while (--depth) { +- bh = sb_getblk(sb, le32_to_cpu(p->key)); ++ key = le32_to_cpu(p->key); ++ if (key > ext4_blocks_count(EXT4_SB(sb)->s_es)) { ++ /* the block was out of range */ ++ ret = -EFSCORRUPTED; ++ goto failure; ++ } ++ bh = sb_getblk(sb, key); + if (unlikely(!bh)) { + ret = -ENOMEM; + goto failure; +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 2b5ef1b642499..283afda26d9cb 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -222,13 +222,13 @@ void ext4_evict_inode(struct inode *inode) + + /* + * For inodes with journalled data, transaction commit could have +- * dirtied the inode. Flush worker is ignoring it because of I_FREEING +- * flag but we still need to remove the inode from the writeback lists. ++ * dirtied the inode. And for inodes with dioread_nolock, unwritten ++ * extents converting worker could merge extents and also have dirtied ++ * the inode. Flush worker is ignoring it because of I_FREEING flag but ++ * we still need to remove the inode from the writeback lists. + */ +- if (!list_empty_careful(&inode->i_io_list)) { +- WARN_ON_ONCE(!ext4_should_journal_data(inode)); ++ if (!list_empty_careful(&inode->i_io_list)) + inode_io_list_del(inode); +- } + + /* + * Protect us against freezing - iput() caller didn't have to have any +@@ -335,6 +335,12 @@ stop_handle: + ext4_xattr_inode_array_free(ea_inode_array); + return; + no_delete: ++ /* ++ * Check out some where else accidentally dirty the evicting inode, ++ * which may probably cause inode use-after-free issues later. ++ */ ++ WARN_ON_ONCE(!list_empty_careful(&inode->i_io_list)); ++ + if (!list_empty(&EXT4_I(inode)->i_fc_list)) + ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL); + ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ +@@ -1309,7 +1315,8 @@ static int ext4_write_end(struct file *file, + + trace_ext4_write_end(inode, pos, len, copied); + +- if (ext4_has_inline_data(inode)) ++ if (ext4_has_inline_data(inode) && ++ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) + return ext4_write_inline_data_end(inode, pos, len, copied, page); + + copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); +@@ -4225,7 +4232,8 @@ int ext4_truncate(struct inode *inode) + + /* If we zero-out tail of the page, we have to create jinode for jbd2 */ + if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { +- if (ext4_inode_attach_jinode(inode) < 0) ++ err = ext4_inode_attach_jinode(inode); ++ if (err) + goto out_trace; + } + +@@ -4473,9 +4481,17 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, + inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; + inode_offset = ((ino - 1) % + EXT4_INODES_PER_GROUP(sb)); +- block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); + iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); + ++ block = ext4_inode_table(sb, gdp); ++ if ((block <= le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) || ++ (block >= ext4_blocks_count(EXT4_SB(sb)->s_es))) { ++ ext4_error(sb, "Invalid inode table block %llu in " ++ "block_group %u", block, iloc->block_group); ++ return -EFSCORRUPTED; ++ } ++ block += (inode_offset / inodes_per_block); ++ + bh = sb_getblk(sb, block); + if (unlikely(!bh)) + return -ENOMEM; +@@ -5044,8 +5060,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, + if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) + ext4_error_inode(inode, function, line, 0, + "casefold flag without casefold feature"); +- brelse(iloc.bh); ++ if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { ++ ext4_error_inode(inode, function, line, 0, ++ "bad inode without EXT4_IGET_BAD flag"); ++ ret = -EUCLEAN; ++ goto bad_inode; ++ } + ++ brelse(iloc.bh); + unlock_new_inode(inode); + return inode; + +@@ -5853,6 +5875,14 @@ static int __ext4_expand_extra_isize(struct inode *inode, + return 0; + } + ++ /* ++ * We may need to allocate external xattr block so we need quotas ++ * initialized. Here we can be called with various locks held so we ++ * cannot affort to initialize quotas ourselves. So just bail. ++ */ ++ if (dquot_initialize_needed(inode)) ++ return -EAGAIN; ++ + /* try to expand with EAs present */ + error = ext4_expand_extra_isize_ea(inode, new_extra_isize, + raw_inode, handle); +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c +index 95dfea28bf4e9..8067ccda34e45 100644 +--- a/fs/ext4/ioctl.c ++++ b/fs/ext4/ioctl.c +@@ -374,7 +374,8 @@ static long swap_inode_boot_loader(struct super_block *sb, + blkcnt_t blocks; + unsigned short bytes; + +- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL); ++ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, ++ EXT4_IGET_SPECIAL | EXT4_IGET_BAD); + if (IS_ERR(inode_bl)) + return PTR_ERR(inode_bl); + ei_bl = EXT4_I(inode_bl); +@@ -424,7 +425,7 @@ static long swap_inode_boot_loader(struct super_block *sb, + /* Protect extent tree against block allocations via delalloc */ + ext4_double_down_write_data_sem(inode, inode_bl); + +- if (inode_bl->i_nlink == 0) { ++ if (is_bad_inode(inode_bl) || !S_ISREG(inode_bl->i_mode)) { + /* this inode has never been used as a BOOT_LOADER */ + set_nlink(inode_bl, 1); + i_uid_write(inode_bl, 0); +@@ -731,6 +732,10 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid) + if (ext4_is_quota_file(inode)) + return err; + ++ err = dquot_initialize(inode); ++ if (err) ++ return err; ++ + err = ext4_get_inode_loc(inode, &iloc); + if (err) + return err; +@@ -746,10 +751,6 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid) + brelse(iloc.bh); + } + +- err = dquot_initialize(inode); +- if (err) +- return err; +- + handle = ext4_journal_start(inode, EXT4_HT_QUOTA, + EXT4_QUOTA_INIT_BLOCKS(sb) + + EXT4_QUOTA_DEL_BLOCKS(sb) + 3); +@@ -1153,19 +1154,22 @@ static int ext4_ioctl_getuuid(struct ext4_sb_info *sbi, + + if (fsuuid.fsu_len == 0) { + fsuuid.fsu_len = UUID_SIZE; +- if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid.fsu_len))) ++ if (copy_to_user(&ufsuuid->fsu_len, &fsuuid.fsu_len, ++ sizeof(fsuuid.fsu_len))) + return -EFAULT; +- return -EINVAL; ++ return 0; + } + +- if (fsuuid.fsu_len != UUID_SIZE || fsuuid.fsu_flags != 0) ++ if (fsuuid.fsu_len < UUID_SIZE || fsuuid.fsu_flags != 0) + return -EINVAL; + + lock_buffer(sbi->s_sbh); + memcpy(uuid, sbi->s_es->s_uuid, UUID_SIZE); + unlock_buffer(sbi->s_sbh); + +- if (copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE)) ++ fsuuid.fsu_len = UUID_SIZE; ++ if (copy_to_user(ufsuuid, &fsuuid, sizeof(fsuuid)) || ++ copy_to_user(&ufsuuid->fsu_uuid[0], uuid, UUID_SIZE)) + return -EFAULT; + return 0; + } +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index c08c0aba18836..1c5518a4bdf91 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -3204,14 +3204,20 @@ end_rmdir: + return retval; + } + +-int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name, +- struct inode *inode) ++int __ext4_unlink(struct inode *dir, const struct qstr *d_name, ++ struct inode *inode, ++ struct dentry *dentry /* NULL during fast_commit recovery */) + { + int retval = -ENOENT; + struct buffer_head *bh; + struct ext4_dir_entry_2 *de; ++ handle_t *handle; + int skip_remove_dentry = 0; + ++ /* ++ * Keep this outside the transaction; it may have to set up the ++ * directory's encryption key, which isn't GFP_NOFS-safe. ++ */ + bh = ext4_find_entry(dir, d_name, &de, NULL); + if (IS_ERR(bh)) + return PTR_ERR(bh); +@@ -3228,7 +3234,14 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name + if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) + skip_remove_dentry = 1; + else +- goto out; ++ goto out_bh; ++ } ++ ++ handle = ext4_journal_start(dir, EXT4_HT_DIR, ++ EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); ++ if (IS_ERR(handle)) { ++ retval = PTR_ERR(handle); ++ goto out_bh; + } + + if (IS_DIRSYNC(dir)) +@@ -3237,12 +3250,12 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name + if (!skip_remove_dentry) { + retval = ext4_delete_entry(handle, dir, de, bh); + if (retval) +- goto out; ++ goto out_handle; + dir->i_ctime = dir->i_mtime = current_time(dir); + ext4_update_dx_flag(dir); + retval = ext4_mark_inode_dirty(handle, dir); + if (retval) +- goto out; ++ goto out_handle; + } else { + retval = 0; + } +@@ -3255,15 +3268,17 @@ int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name + ext4_orphan_add(handle, inode); + inode->i_ctime = current_time(inode); + retval = ext4_mark_inode_dirty(handle, inode); +- +-out: ++ if (dentry && !retval) ++ ext4_fc_track_unlink(handle, dentry); ++out_handle: ++ ext4_journal_stop(handle); ++out_bh: + brelse(bh); + return retval; + } + + static int ext4_unlink(struct inode *dir, struct dentry *dentry) + { +- handle_t *handle; + int retval; + + if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb)))) +@@ -3281,16 +3296,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) + if (retval) + goto out_trace; + +- handle = ext4_journal_start(dir, EXT4_HT_DIR, +- EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); +- if (IS_ERR(handle)) { +- retval = PTR_ERR(handle); +- goto out_trace; +- } +- +- retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry)); +- if (!retval) +- ext4_fc_track_unlink(handle, dentry); ++ retval = __ext4_unlink(dir, &dentry->d_name, d_inode(dentry), dentry); + #if IS_ENABLED(CONFIG_UNICODE) + /* VFS negative dentries are incompatible with Encoding and + * Case-insensitiveness. Eventually we'll want avoid +@@ -3301,8 +3307,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) + if (IS_CASEFOLDED(dir)) + d_invalidate(dentry); + #endif +- if (handle) +- ext4_journal_stop(handle); + + out_trace: + trace_ext4_unlink_exit(dentry, retval); +@@ -3792,6 +3796,9 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir, + return -EXDEV; + + retval = dquot_initialize(old.dir); ++ if (retval) ++ return retval; ++ retval = dquot_initialize(old.inode); + if (retval) + return retval; + retval = dquot_initialize(new.dir); +diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c +index 69a9cf9137a61..e5b47dda33175 100644 +--- a/fs/ext4/orphan.c ++++ b/fs/ext4/orphan.c +@@ -412,7 +412,7 @@ void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) + /* don't clear list on RO mount w/ errors */ + if (es->s_last_orphan && !(s_flags & SB_RDONLY)) { + ext4_msg(sb, KERN_INFO, "Errors on filesystem, " +- "clearing orphan list.\n"); ++ "clearing orphan list."); + es->s_last_orphan = 0; + } + ext4_debug("Skipping orphan recovery on fs with errors.\n"); +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index 46b87ffeb3045..b493233750ab2 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -1110,6 +1110,16 @@ exit_free: + return err; + } + ++static inline void ext4_set_block_group_nr(struct super_block *sb, char *data, ++ ext4_group_t group) ++{ ++ struct ext4_super_block *es = (struct ext4_super_block *) data; ++ ++ es->s_block_group_nr = cpu_to_le16(group); ++ if (ext4_has_metadata_csum(sb)) ++ es->s_checksum = ext4_superblock_csum(sb, es); ++} ++ + /* + * Update the backup copies of the ext4 metadata. These don't need to be part + * of the main resize transaction, because e2fsck will re-write them if there +@@ -1158,7 +1168,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data, + while (group < sbi->s_groups_count) { + struct buffer_head *bh; + ext4_fsblk_t backup_block; +- struct ext4_super_block *es; ++ int has_super = ext4_bg_has_super(sb, group); ++ ext4_fsblk_t first_block = ext4_group_first_block_no(sb, group); + + /* Out of journal space, and can't get more - abort - so sad */ + err = ext4_resize_ensure_credits_batch(handle, 1); +@@ -1168,8 +1179,7 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data, + if (meta_bg == 0) + backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; + else +- backup_block = (ext4_group_first_block_no(sb, group) + +- ext4_bg_has_super(sb, group)); ++ backup_block = first_block + has_super; + + bh = sb_getblk(sb, backup_block); + if (unlikely(!bh)) { +@@ -1187,10 +1197,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data, + memcpy(bh->b_data, data, size); + if (rest) + memset(bh->b_data + size, 0, rest); +- es = (struct ext4_super_block *) bh->b_data; +- es->s_block_group_nr = cpu_to_le16(group); +- if (ext4_has_metadata_csum(sb)) +- es->s_checksum = ext4_superblock_csum(sb, es); ++ if (has_super && (backup_block == first_block)) ++ ext4_set_block_group_nr(sb, bh->b_data, group); + set_buffer_uptodate(bh); + unlock_buffer(bh); + err = ext4_handle_dirty_metadata(handle, NULL, bh); +@@ -1476,8 +1484,6 @@ static void ext4_update_super(struct super_block *sb, + * active. */ + ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + + reserved_blocks); +- ext4_superblock_csum_set(sb); +- unlock_buffer(sbi->s_sbh); + + /* Update the free space counts */ + percpu_counter_add(&sbi->s_freeclusters_counter, +@@ -1513,6 +1519,8 @@ static void ext4_update_super(struct super_block *sb, + ext4_calculate_overhead(sb); + es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead); + ++ ext4_superblock_csum_set(sb); ++ unlock_buffer(sbi->s_sbh); + if (test_opt(sb, DEBUG)) + printk(KERN_DEBUG "EXT4-fs: added group %u:" + "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, +@@ -1596,8 +1604,8 @@ exit_journal: + int meta_bg = ext4_has_feature_meta_bg(sb); + sector_t old_gdb = 0; + +- update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, +- sizeof(struct ext4_super_block), 0); ++ update_backups(sb, ext4_group_first_block_no(sb, 0), ++ (char *)es, sizeof(struct ext4_super_block), 0); + for (; gdb_num <= gdb_num_end; gdb_num++) { + struct buffer_head *gdb_bh; + +@@ -1808,7 +1816,7 @@ errout: + if (test_opt(sb, DEBUG)) + printk(KERN_DEBUG "EXT4-fs: extended group to %llu " + "blocks\n", ext4_blocks_count(es)); +- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, ++ update_backups(sb, ext4_group_first_block_no(sb, 0), + (char *)es, sizeof(struct ext4_super_block), 0); + } + return err; +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 7cdd2138c8972..aa4f65663fad8 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -1323,6 +1323,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) + return NULL; + + inode_set_iversion(&ei->vfs_inode, 1); ++ ei->i_flags = 0; + spin_lock_init(&ei->i_raw_lock); + INIT_LIST_HEAD(&ei->i_prealloc_list); + atomic_set(&ei->i_prealloc_active, 0); +@@ -2247,7 +2248,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) + return -EINVAL; + } + +- error = fs_lookup_param(fc, param, 1, &path); ++ error = fs_lookup_param(fc, param, 1, LOOKUP_FOLLOW, &path); + if (error) { + ext4_msg(NULL, KERN_ERR, "error: could not find " + "journal device path"); +@@ -5287,14 +5288,15 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) + goto failed_mount3a; + } else { + /* Nojournal mode, all journal mount options are illegal */ +- if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { ++ if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { + ext4_msg(sb, KERN_ERR, "can't mount with " +- "journal_checksum, fs mounted w/o journal"); ++ "journal_async_commit, fs mounted w/o journal"); + goto failed_mount3a; + } +- if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ++ ++ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { + ext4_msg(sb, KERN_ERR, "can't mount with " +- "journal_async_commit, fs mounted w/o journal"); ++ "journal_checksum, fs mounted w/o journal"); + goto failed_mount3a; + } + if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { +@@ -5723,7 +5725,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb, + + ext4_debug("Journal inode found at %p: %lld bytes\n", + journal_inode, journal_inode->i_size); +- if (!S_ISREG(journal_inode->i_mode)) { ++ if (!S_ISREG(journal_inode->i_mode) || IS_ENCRYPTED(journal_inode)) { + ext4_msg(sb, KERN_ERR, "invalid journal inode"); + iput(journal_inode); + return NULL; +@@ -6886,6 +6888,20 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, + return err; + } + ++static inline bool ext4_check_quota_inum(int type, unsigned long qf_inum) ++{ ++ switch (type) { ++ case USRQUOTA: ++ return qf_inum == EXT4_USR_QUOTA_INO; ++ case GRPQUOTA: ++ return qf_inum == EXT4_GRP_QUOTA_INO; ++ case PRJQUOTA: ++ return qf_inum >= EXT4_GOOD_OLD_FIRST_INO; ++ default: ++ BUG(); ++ } ++} ++ + static int ext4_quota_enable(struct super_block *sb, int type, int format_id, + unsigned int flags) + { +@@ -6902,9 +6918,16 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id, + if (!qf_inums[type]) + return -EPERM; + ++ if (!ext4_check_quota_inum(type, qf_inums[type])) { ++ ext4_error(sb, "Bad quota inum: %lu, type: %d", ++ qf_inums[type], type); ++ return -EUCLEAN; ++ } ++ + qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL); + if (IS_ERR(qf_inode)) { +- ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]); ++ ext4_error(sb, "Bad quota inode: %lu, type: %d", ++ qf_inums[type], type); + return PTR_ERR(qf_inode); + } + +@@ -6943,8 +6966,9 @@ int ext4_enable_quotas(struct super_block *sb) + if (err) { + ext4_warning(sb, + "Failed to enable quota tracking " +- "(type=%d, err=%d). Please run " +- "e2fsck to fix.", type, err); ++ "(type=%d, err=%d, ino=%lu). " ++ "Please run e2fsck to fix.", type, ++ err, qf_inums[type]); + for (type--; type >= 0; type--) { + struct inode *inode; + +diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c +index 3c640bd7ecaeb..30e3b65798b50 100644 +--- a/fs/ext4/verity.c ++++ b/fs/ext4/verity.c +@@ -79,7 +79,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count, + size_t n = min_t(size_t, count, + PAGE_SIZE - offset_in_page(pos)); + struct page *page; +- void *fsdata; ++ void *fsdata = NULL; + int res; + + res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata); +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 36d6ba7190b6d..866772a2e068f 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -1281,7 +1281,7 @@ retry_ref: + ce = mb_cache_entry_get(ea_block_cache, hash, + bh->b_blocknr); + if (ce) { +- ce->e_reusable = 1; ++ set_bit(MBE_REUSABLE_B, &ce->e_flags); + mb_cache_entry_put(ea_block_cache, ce); + } + } +@@ -1441,6 +1441,9 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle, + if (!err) + err = ext4_inode_attach_jinode(ea_inode); + if (err) { ++ if (ext4_xattr_inode_dec_ref(handle, ea_inode)) ++ ext4_warning_inode(ea_inode, ++ "cleanup dec ref error %d", err); + iput(ea_inode); + return ERR_PTR(err); + } +@@ -2042,7 +2045,7 @@ inserted: + } + BHDR(new_bh)->h_refcount = cpu_to_le32(ref); + if (ref == EXT4_XATTR_REFCOUNT_MAX) +- ce->e_reusable = 0; ++ clear_bit(MBE_REUSABLE_B, &ce->e_flags); + ea_bdebug(new_bh, "reusing; refcount now=%d", + ref); + ext4_xattr_block_csum_set(inode, new_bh); +@@ -2070,19 +2073,11 @@ inserted: + + goal = ext4_group_first_block_no(sb, + EXT4_I(inode)->i_block_group); +- +- /* non-extent files can't have physical blocks past 2^32 */ +- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) +- goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; +- + block = ext4_new_meta_blocks(handle, inode, goal, 0, + NULL, &error); + if (error) + goto cleanup; + +- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) +- BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); +- + ea_idebug(inode, "creating block %llu", + (unsigned long long)block); + +@@ -2555,7 +2550,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, + + is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); + bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS); +- buffer = kmalloc(value_size, GFP_NOFS); ++ buffer = kvmalloc(value_size, GFP_NOFS); + b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS); + if (!is || !bs || !buffer || !b_entry_name) { + error = -ENOMEM; +@@ -2607,7 +2602,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, + error = 0; + out: + kfree(b_entry_name); +- kfree(buffer); ++ kvfree(buffer); + if (is) + brelse(is->iloc.bh); + if (bs) +diff --git a/fs/fs_parser.c b/fs/fs_parser.c +index ed40ce5742fda..edb3712dcfa58 100644 +--- a/fs/fs_parser.c ++++ b/fs/fs_parser.c +@@ -138,15 +138,16 @@ EXPORT_SYMBOL(__fs_parse); + * @fc: The filesystem context to log errors through. + * @param: The parameter. + * @want_bdev: T if want a blockdev ++ * @flags: Pathwalk flags passed to filename_lookup() + * @_path: The result of the lookup + */ + int fs_lookup_param(struct fs_context *fc, + struct fs_parameter *param, + bool want_bdev, ++ unsigned int flags, + struct path *_path) + { + struct filename *f; +- unsigned int flags = 0; + bool put_f; + int ret; + +diff --git a/fs/mbcache.c b/fs/mbcache.c +index e272ad738faff..2a4b8b549e934 100644 +--- a/fs/mbcache.c ++++ b/fs/mbcache.c +@@ -100,8 +100,9 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, + atomic_set(&entry->e_refcnt, 2); + entry->e_key = key; + entry->e_value = value; +- entry->e_reusable = reusable; +- entry->e_referenced = 0; ++ entry->e_flags = 0; ++ if (reusable) ++ set_bit(MBE_REUSABLE_B, &entry->e_flags); + head = mb_cache_entry_head(cache, key); + hlist_bl_lock(head); + hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { +@@ -165,7 +166,8 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache, + while (node) { + entry = hlist_bl_entry(node, struct mb_cache_entry, + e_hash_list); +- if (entry->e_key == key && entry->e_reusable && ++ if (entry->e_key == key && ++ test_bit(MBE_REUSABLE_B, &entry->e_flags) && + atomic_inc_not_zero(&entry->e_refcnt)) + goto out; + node = node->next; +@@ -284,7 +286,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_or_get); + void mb_cache_entry_touch(struct mb_cache *cache, + struct mb_cache_entry *entry) + { +- entry->e_referenced = 1; ++ set_bit(MBE_REFERENCED_B, &entry->e_flags); + } + EXPORT_SYMBOL(mb_cache_entry_touch); + +@@ -309,9 +311,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, + entry = list_first_entry(&cache->c_list, + struct mb_cache_entry, e_list); + /* Drop initial hash reference if there is no user */ +- if (entry->e_referenced || ++ if (test_bit(MBE_REFERENCED_B, &entry->e_flags) || + atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) { +- entry->e_referenced = 0; ++ clear_bit(MBE_REFERENCED_B, &entry->e_flags); + list_move_tail(&entry->e_list, &cache->c_list); + continue; + } +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c +index 0427b44bfee54..f27faf5db5544 100644 +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -2324,6 +2324,8 @@ static int vfs_setup_quota_inode(struct inode *inode, int type) + struct super_block *sb = inode->i_sb; + struct quota_info *dqopt = sb_dqopt(sb); + ++ if (is_bad_inode(inode)) ++ return -EUCLEAN; + if (!S_ISREG(inode->i_mode)) + return -EACCES; + if (IS_RDONLY(inode)) +diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h +index 9e1e6965f4074..0eb8f035b3d9f 100644 +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -642,7 +642,7 @@ static inline u32 type_flag(u32 type) + } + + /* only use after check_attach_btf_id() */ +-static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) ++static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) + { + return prog->type == BPF_PROG_TYPE_EXT ? + prog->aux->dst_prog->type : prog->type; +diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h +index 34aab4dd336c8..4dc7cda4fd469 100644 +--- a/include/linux/devfreq.h ++++ b/include/linux/devfreq.h +@@ -152,8 +152,8 @@ struct devfreq_stats { + * @max_state: count of entry present in the frequency table. + * @previous_freq: previously configured frequency value. + * @last_status: devfreq user device info, performance statistics +- * @data: Private data of the governor. The devfreq framework does not +- * touch this. ++ * @data: devfreq driver pass to governors, governor should not change it. ++ * @governor_data: private data for governors, devfreq core doesn't touch it. + * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs) + * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs) + * @scaling_min_freq: Limit minimum frequency requested by OPP interface +@@ -193,7 +193,8 @@ struct devfreq { + unsigned long previous_freq; + struct devfreq_dev_status last_status; + +- void *data; /* private data for governors */ ++ void *data; ++ void *governor_data; + + struct dev_pm_qos_request user_min_freq_req; + struct dev_pm_qos_request user_max_freq_req; +diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h +index f103c91139d4a..01542c4b87a2b 100644 +--- a/include/linux/fs_parser.h ++++ b/include/linux/fs_parser.h +@@ -76,6 +76,7 @@ static inline int fs_parse(struct fs_context *fc, + extern int fs_lookup_param(struct fs_context *fc, + struct fs_parameter *param, + bool want_bdev, ++ unsigned int flags, + struct path *_path); + + extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found); +diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h +index 2da63fd7b98f4..97e64184767de 100644 +--- a/include/linux/mbcache.h ++++ b/include/linux/mbcache.h +@@ -10,6 +10,12 @@ + + struct mb_cache; + ++/* Cache entry flags */ ++enum { ++ MBE_REFERENCED_B = 0, ++ MBE_REUSABLE_B ++}; ++ + struct mb_cache_entry { + /* List of entries in cache - protected by cache->c_list_lock */ + struct list_head e_list; +@@ -26,8 +32,7 @@ struct mb_cache_entry { + atomic_t e_refcnt; + /* Key in hash - stable during lifetime of the entry */ + u32 e_key; +- u32 e_referenced:1; +- u32 e_reusable:1; ++ unsigned long e_flags; + /* User provided value - stable during lifetime of the entry */ + u64 e_value; + }; +diff --git a/include/linux/prandom.h b/include/linux/prandom.h +index e0a0759dd09c0..1f4a0de7b019e 100644 +--- a/include/linux/prandom.h ++++ b/include/linux/prandom.h +@@ -23,24 +23,10 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + #define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +-/** +- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) +- * @ep_ro: right open interval endpoint +- * +- * Returns a pseudo-random number that is in interval [0, ep_ro). This is +- * useful when requesting a random index of an array containing ep_ro elements, +- * for example. The result is somewhat biased when ep_ro is not a power of 2, +- * so do not use this for cryptographic purposes. +- * +- * Returns: pseudo-random number in interval [0, ep_ro) +- */ ++/* Deprecated: use get_random_u32_below() instead. */ + static inline u32 prandom_u32_max(u32 ep_ro) + { +- if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8) +- return (get_random_u8() * ep_ro) >> 8; +- if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16) +- return (get_random_u16() * ep_ro) >> 16; +- return ((u64)get_random_u32() * ep_ro) >> 32; ++ return get_random_u32_below(ep_ro); + } + + /* +diff --git a/include/linux/random.h b/include/linux/random.h +index 147a5e0d0b8ed..bd954ecbef901 100644 +--- a/include/linux/random.h ++++ b/include/linux/random.h +@@ -51,6 +51,71 @@ static inline unsigned long get_random_long(void) + #endif + } + ++u32 __get_random_u32_below(u32 ceil); ++ ++/* ++ * Returns a random integer in the interval [0, ceil), with uniform ++ * distribution, suitable for all uses. Fastest when ceil is a constant, but ++ * still fast for variable ceil as well. ++ */ ++static inline u32 get_random_u32_below(u32 ceil) ++{ ++ if (!__builtin_constant_p(ceil)) ++ return __get_random_u32_below(ceil); ++ ++ /* ++ * For the fast path, below, all operations on ceil are precomputed by ++ * the compiler, so this incurs no overhead for checking pow2, doing ++ * divisions, or branching based on integer size. The resultant ++ * algorithm does traditional reciprocal multiplication (typically ++ * optimized by the compiler into shifts and adds), rejecting samples ++ * whose lower half would indicate a range indivisible by ceil. ++ */ ++ BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); ++ if (ceil <= 1) ++ return 0; ++ for (;;) { ++ if (ceil <= 1U << 8) { ++ u32 mult = ceil * get_random_u8(); ++ if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) ++ return mult >> 8; ++ } else if (ceil <= 1U << 16) { ++ u32 mult = ceil * get_random_u16(); ++ if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) ++ return mult >> 16; ++ } else { ++ u64 mult = (u64)ceil * get_random_u32(); ++ if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) ++ return mult >> 32; ++ } ++ } ++} ++ ++/* ++ * Returns a random integer in the interval (floor, U32_MAX], with uniform ++ * distribution, suitable for all uses. Fastest when floor is a constant, but ++ * still fast for variable floor as well. ++ */ ++static inline u32 get_random_u32_above(u32 floor) ++{ ++ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX, ++ "get_random_u32_above() must take floor < U32_MAX"); ++ return floor + 1 + get_random_u32_below(U32_MAX - floor); ++} ++ ++/* ++ * Returns a random integer in the interval [floor, ceil], with uniform ++ * distribution, suitable for all uses. Fastest when floor and ceil are ++ * constant, but still fast for variable floor and ceil as well. ++ */ ++static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil) ++{ ++ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) && ++ (floor > ceil || ceil - floor == U32_MAX), ++ "get_random_u32_inclusive() must take floor <= ceil"); ++ return floor + get_random_u32_below(ceil - floor + 1); ++} ++ + /* + * On 64-bit architectures, protect against non-terminated C string overflows + * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. +diff --git a/include/net/mptcp.h b/include/net/mptcp.h +index 412479ebf5ad3..3c5c68618fcc5 100644 +--- a/include/net/mptcp.h ++++ b/include/net/mptcp.h +@@ -97,8 +97,6 @@ struct mptcp_out_options { + }; + + #ifdef CONFIG_MPTCP +-extern struct request_sock_ops mptcp_subflow_request_sock_ops; +- + void mptcp_init(void); + + static inline bool sk_is_mptcp(const struct sock *sk) +@@ -188,6 +186,9 @@ void mptcp_seq_show(struct seq_file *seq); + int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb); ++struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, ++ struct sock *sk_listener, ++ bool attach_listener); + + __be32 mptcp_get_reset_option(const struct sk_buff *skb); + +@@ -274,6 +275,13 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req, + return 0; /* TCP fallback */ + } + ++static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, ++ struct sock *sk_listener, ++ bool attach_listener) ++{ ++ return NULL; ++} ++ + static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); } + #endif /* CONFIG_MPTCP */ + +diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h +index 229e8fae66a34..ced95fec3367d 100644 +--- a/include/trace/events/ext4.h ++++ b/include/trace/events/ext4.h +@@ -104,6 +104,7 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE); + TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR); + TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE); + TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA); ++TRACE_DEFINE_ENUM(EXT4_FC_REASON_ENCRYPTED_FILENAME); + TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); + + #define show_fc_reason(reason) \ +@@ -116,7 +117,8 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); + { EXT4_FC_REASON_RESIZE, "RESIZE"}, \ + { EXT4_FC_REASON_RENAME_DIR, "RENAME_DIR"}, \ + { EXT4_FC_REASON_FALLOC_RANGE, "FALLOC_RANGE"}, \ +- { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}) ++ { EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \ ++ { EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"}) + + TRACE_EVENT(ext4_other_inode_update_time, + TP_PROTO(struct inode *inode, ino_t orig_ino), +@@ -2764,7 +2766,7 @@ TRACE_EVENT(ext4_fc_stats, + ), + + TP_printk("dev %d,%d fc ineligible reasons:\n" +- "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u " ++ "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u" + "num_commits:%lu, ineligible: %lu, numblks: %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), +@@ -2776,6 +2778,7 @@ TRACE_EVENT(ext4_fc_stats, + FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), ++ FC_REASON_NAME_STAT(EXT4_FC_REASON_ENCRYPTED_FILENAME), + __entry->fc_commits, __entry->fc_ineligible_commits, + __entry->fc_numblks) + ); +diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h +index 99f783c384bb4..8f5ee380d3093 100644 +--- a/include/trace/events/jbd2.h ++++ b/include/trace/events/jbd2.h +@@ -40,7 +40,7 @@ DECLARE_EVENT_CLASS(jbd2_commit, + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) +- __field( int, transaction ) ++ __field( tid_t, transaction ) + ), + + TP_fast_assign( +@@ -49,7 +49,7 @@ DECLARE_EVENT_CLASS(jbd2_commit, + __entry->transaction = commit_transaction->t_tid; + ), + +- TP_printk("dev %d,%d transaction %d sync %d", ++ TP_printk("dev %d,%d transaction %u sync %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit) + ); +@@ -97,8 +97,8 @@ TRACE_EVENT(jbd2_end_commit, + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( char, sync_commit ) +- __field( int, transaction ) +- __field( int, head ) ++ __field( tid_t, transaction ) ++ __field( tid_t, head ) + ), + + TP_fast_assign( +@@ -108,7 +108,7 @@ TRACE_EVENT(jbd2_end_commit, + __entry->head = journal->j_tail_sequence; + ), + +- TP_printk("dev %d,%d transaction %d sync %d head %d", ++ TP_printk("dev %d,%d transaction %u sync %d head %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->transaction, __entry->sync_commit, __entry->head) + ); +@@ -134,14 +134,14 @@ TRACE_EVENT(jbd2_submit_inode_data, + ); + + DECLARE_EVENT_CLASS(jbd2_handle_start_class, +- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, ++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type, + unsigned int line_no, int requested_blocks), + + TP_ARGS(dev, tid, type, line_no, requested_blocks), + + TP_STRUCT__entry( + __field( dev_t, dev ) +- __field( unsigned long, tid ) ++ __field( tid_t, tid ) + __field( unsigned int, type ) + __field( unsigned int, line_no ) + __field( int, requested_blocks) +@@ -155,28 +155,28 @@ DECLARE_EVENT_CLASS(jbd2_handle_start_class, + __entry->requested_blocks = requested_blocks; + ), + +- TP_printk("dev %d,%d tid %lu type %u line_no %u " ++ TP_printk("dev %d,%d tid %u type %u line_no %u " + "requested_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->requested_blocks) + ); + + DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start, +- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, ++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type, + unsigned int line_no, int requested_blocks), + + TP_ARGS(dev, tid, type, line_no, requested_blocks) + ); + + DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart, +- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, ++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type, + unsigned int line_no, int requested_blocks), + + TP_ARGS(dev, tid, type, line_no, requested_blocks) + ); + + TRACE_EVENT(jbd2_handle_extend, +- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, ++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type, + unsigned int line_no, int buffer_credits, + int requested_blocks), + +@@ -184,7 +184,7 @@ TRACE_EVENT(jbd2_handle_extend, + + TP_STRUCT__entry( + __field( dev_t, dev ) +- __field( unsigned long, tid ) ++ __field( tid_t, tid ) + __field( unsigned int, type ) + __field( unsigned int, line_no ) + __field( int, buffer_credits ) +@@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_handle_extend, + __entry->requested_blocks = requested_blocks; + ), + +- TP_printk("dev %d,%d tid %lu type %u line_no %u " ++ TP_printk("dev %d,%d tid %u type %u line_no %u " + "buffer_credits %d requested_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->buffer_credits, +@@ -208,7 +208,7 @@ TRACE_EVENT(jbd2_handle_extend, + ); + + TRACE_EVENT(jbd2_handle_stats, +- TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, ++ TP_PROTO(dev_t dev, tid_t tid, unsigned int type, + unsigned int line_no, int interval, int sync, + int requested_blocks, int dirtied_blocks), + +@@ -217,7 +217,7 @@ TRACE_EVENT(jbd2_handle_stats, + + TP_STRUCT__entry( + __field( dev_t, dev ) +- __field( unsigned long, tid ) ++ __field( tid_t, tid ) + __field( unsigned int, type ) + __field( unsigned int, line_no ) + __field( int, interval ) +@@ -237,7 +237,7 @@ TRACE_EVENT(jbd2_handle_stats, + __entry->dirtied_blocks = dirtied_blocks; + ), + +- TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " ++ TP_printk("dev %d,%d tid %u type %u line_no %u interval %d " + "sync %d requested_blocks %d dirtied_blocks %d", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->interval, +@@ -246,14 +246,14 @@ TRACE_EVENT(jbd2_handle_stats, + ); + + TRACE_EVENT(jbd2_run_stats, +- TP_PROTO(dev_t dev, unsigned long tid, ++ TP_PROTO(dev_t dev, tid_t tid, + struct transaction_run_stats_s *stats), + + TP_ARGS(dev, tid, stats), + + TP_STRUCT__entry( + __field( dev_t, dev ) +- __field( unsigned long, tid ) ++ __field( tid_t, tid ) + __field( unsigned long, wait ) + __field( unsigned long, request_delay ) + __field( unsigned long, running ) +@@ -279,7 +279,7 @@ TRACE_EVENT(jbd2_run_stats, + __entry->blocks_logged = stats->rs_blocks_logged; + ), + +- TP_printk("dev %d,%d tid %lu wait %u request_delay %u running %u " ++ TP_printk("dev %d,%d tid %u wait %u request_delay %u running %u " + "locked %u flushing %u logging %u handle_count %u " + "blocks %u blocks_logged %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, +@@ -294,14 +294,14 @@ TRACE_EVENT(jbd2_run_stats, + ); + + TRACE_EVENT(jbd2_checkpoint_stats, +- TP_PROTO(dev_t dev, unsigned long tid, ++ TP_PROTO(dev_t dev, tid_t tid, + struct transaction_chp_stats_s *stats), + + TP_ARGS(dev, tid, stats), + + TP_STRUCT__entry( + __field( dev_t, dev ) +- __field( unsigned long, tid ) ++ __field( tid_t, tid ) + __field( unsigned long, chp_time ) + __field( __u32, forced_to_close ) + __field( __u32, written ) +@@ -317,7 +317,7 @@ TRACE_EVENT(jbd2_checkpoint_stats, + __entry->dropped = stats->cs_dropped; + ), + +- TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " ++ TP_printk("dev %d,%d tid %u chp_time %u forced_to_close %u " + "written %u dropped %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + jiffies_to_msecs(__entry->chp_time), +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 25a54e04560e5..17ab3e15ac25f 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -2088,6 +2088,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, + bool bpf_prog_map_compatible(struct bpf_map *map, + const struct bpf_prog *fp) + { ++ enum bpf_prog_type prog_type = resolve_prog_type(fp); + bool ret; + + if (fp->kprobe_override) +@@ -2098,12 +2099,12 @@ bool bpf_prog_map_compatible(struct bpf_map *map, + /* There's no owner yet where we could check for + * compatibility. + */ +- map->owner.type = fp->type; ++ map->owner.type = prog_type; + map->owner.jited = fp->jited; + map->owner.xdp_has_frags = fp->aux->xdp_has_frags; + ret = true; + } else { +- ret = map->owner.type == fp->type && ++ ret = map->owner.type == prog_type && + map->owner.jited == fp->jited && + map->owner.xdp_has_frags == fp->aux->xdp_has_frags; + } +diff --git a/kernel/events/core.c b/kernel/events/core.c +index 732b392fc5c63..3b9e86108f435 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -12231,12 +12231,12 @@ SYSCALL_DEFINE5(perf_event_open, + if (flags & ~PERF_FLAG_ALL) + return -EINVAL; + +- /* Do we allow access to perf_event_open(2) ? */ +- err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); ++ err = perf_copy_attr(attr_uptr, &attr); + if (err) + return err; + +- err = perf_copy_attr(attr_uptr, &attr); ++ /* Do we allow access to perf_event_open(2) ? */ ++ err = security_perf_event_open(&attr, PERF_SECURITY_OPEN); + if (err) + return err; + +diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig +index e9e95c790b8ee..93d7249962833 100644 +--- a/kernel/trace/Kconfig ++++ b/kernel/trace/Kconfig +@@ -375,6 +375,7 @@ config SCHED_TRACER + config HWLAT_TRACER + bool "Tracer to detect hardware latencies (like SMIs)" + select GENERIC_TRACER ++ select TRACER_MAX_TRACE + help + This tracer, when enabled will create one or more kernel threads, + depending on what the cpumask file is set to, which each thread +@@ -410,6 +411,7 @@ config HWLAT_TRACER + config OSNOISE_TRACER + bool "OS Noise tracer" + select GENERIC_TRACER ++ select TRACER_MAX_TRACE + help + In the context of high-performance computing (HPC), the Operating + System Noise (osnoise) refers to the interference experienced by an +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 5cfc95a52bc37..3076af8dbf32e 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1421,6 +1421,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) + return false; + } + EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); ++#define free_snapshot(tr) do { } while (0) + #endif /* CONFIG_TRACER_SNAPSHOT */ + + void tracer_tracing_off(struct trace_array *tr) +@@ -1692,6 +1693,8 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) + } + + unsigned long __read_mostly tracing_thresh; ++ ++#ifdef CONFIG_TRACER_MAX_TRACE + static const struct file_operations tracing_max_lat_fops; + + #ifdef LATENCY_FS_NOTIFY +@@ -1748,18 +1751,14 @@ void latency_fsnotify(struct trace_array *tr) + irq_work_queue(&tr->fsnotify_irqwork); + } + +-#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ +- || defined(CONFIG_OSNOISE_TRACER) ++#else /* !LATENCY_FS_NOTIFY */ + + #define trace_create_maxlat_file(tr, d_tracer) \ + trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ + d_tracer, &tr->max_latency, &tracing_max_lat_fops) + +-#else +-#define trace_create_maxlat_file(tr, d_tracer) do { } while (0) + #endif + +-#ifdef CONFIG_TRACER_MAX_TRACE + /* + * Copy the new maximum trace into the separate maximum-trace + * structure. (this way the maximum trace is permanently saved, +@@ -1834,14 +1833,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, + ring_buffer_record_off(tr->max_buffer.buffer); + + #ifdef CONFIG_TRACER_SNAPSHOT +- if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) +- goto out_unlock; ++ if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { ++ arch_spin_unlock(&tr->max_lock); ++ return; ++ } + #endif + swap(tr->array_buffer.buffer, tr->max_buffer.buffer); + + __update_max_tr(tr, tsk, cpu); + +- out_unlock: + arch_spin_unlock(&tr->max_lock); + } + +@@ -1888,6 +1888,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) + __update_max_tr(tr, tsk, cpu); + arch_spin_unlock(&tr->max_lock); + } ++ + #endif /* CONFIG_TRACER_MAX_TRACE */ + + static int wait_on_pipe(struct trace_iterator *iter, int full) +@@ -6572,7 +6573,7 @@ out: + return ret; + } + +-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) ++#ifdef CONFIG_TRACER_MAX_TRACE + + static ssize_t + tracing_max_lat_read(struct file *filp, char __user *ubuf, +@@ -6796,7 +6797,20 @@ waitagain: + + ret = print_trace_line(iter); + if (ret == TRACE_TYPE_PARTIAL_LINE) { +- /* don't print partial lines */ ++ /* ++ * If one print_trace_line() fills entire trace_seq in one shot, ++ * trace_seq_to_user() will returns -EBUSY because save_len == 0, ++ * In this case, we need to consume it, otherwise, loop will peek ++ * this event next time, resulting in an infinite loop. ++ */ ++ if (save_len == 0) { ++ iter->seq.full = 0; ++ trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); ++ trace_consume(iter); ++ break; ++ } ++ ++ /* In other cases, don't print partial lines */ + iter->seq.seq.len = save_len; + break; + } +@@ -7587,7 +7601,7 @@ static const struct file_operations tracing_thresh_fops = { + .llseek = generic_file_llseek, + }; + +-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) ++#ifdef CONFIG_TRACER_MAX_TRACE + static const struct file_operations tracing_max_lat_fops = { + .open = tracing_open_generic, + .read = tracing_max_lat_read, +@@ -9601,7 +9615,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) + + create_trace_options_dir(tr); + ++#ifdef CONFIG_TRACER_MAX_TRACE + trace_create_maxlat_file(tr, d_tracer); ++#endif + + if (ftrace_create_function_files(tr, d_tracer)) + MEM_FAIL(1, "Could not allocate function filter files"); +diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h +index d42e245071525..5581754d97628 100644 +--- a/kernel/trace/trace.h ++++ b/kernel/trace/trace.h +@@ -308,8 +308,7 @@ struct trace_array { + struct array_buffer max_buffer; + bool allocated_snapshot; + #endif +-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ +- || defined(CONFIG_OSNOISE_TRACER) ++#ifdef CONFIG_TRACER_MAX_TRACE + unsigned long max_latency; + #ifdef CONFIG_FSNOTIFY + struct dentry *d_max_latency; +@@ -688,12 +687,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, + void *cond_data); + void update_max_tr_single(struct trace_array *tr, + struct task_struct *tsk, int cpu); +-#endif /* CONFIG_TRACER_MAX_TRACE */ + +-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \ +- || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY) ++#ifdef CONFIG_FSNOTIFY + #define LATENCY_FS_NOTIFY + #endif ++#endif /* CONFIG_TRACER_MAX_TRACE */ + + #ifdef LATENCY_FS_NOTIFY + void latency_fsnotify(struct trace_array *tr); +@@ -1956,17 +1954,30 @@ static __always_inline void trace_iterator_reset(struct trace_iterator *iter) + } + + /* Check the name is good for event/group/fields */ +-static inline bool is_good_name(const char *name) ++static inline bool __is_good_name(const char *name, bool hash_ok) + { +- if (!isalpha(*name) && *name != '_') ++ if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) + return false; + while (*++name != '\0') { +- if (!isalpha(*name) && !isdigit(*name) && *name != '_') ++ if (!isalpha(*name) && !isdigit(*name) && *name != '_' && ++ (!hash_ok || *name != '-')) + return false; + } + return true; + } + ++/* Check the name is good for event/group/fields */ ++static inline bool is_good_name(const char *name) ++{ ++ return __is_good_name(name, false); ++} ++ ++/* Check the name is good for system */ ++static inline bool is_good_system_name(const char *name) ++{ ++ return __is_good_name(name, true); ++} ++ + /* Convert certain expected symbols into '_' when generating event names */ + static inline void sanitize_event_name(char *name) + { +diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c +index 352b65e2b9105..753fc536525d3 100644 +--- a/kernel/trace/trace_eprobe.c ++++ b/kernel/trace/trace_eprobe.c +@@ -564,6 +564,9 @@ static void eprobe_trigger_func(struct event_trigger_data *data, + { + struct eprobe_data *edata = data->private_data; + ++ if (unlikely(!rec)) ++ return; ++ + if (unlikely(!rec)) + return; + +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index b6e5724a9ea35..c6e406995c112 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -617,7 +617,7 @@ struct action_data { + * event param, and is passed to the synthetic event + * invocation. + */ +- unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; ++ unsigned int var_ref_idx[SYNTH_FIELDS_MAX]; + struct synth_event *synth_event; + bool use_trace_keyword; + char *synth_event_name; +@@ -2173,7 +2173,9 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, + return ref_field; + } + } +- ++ /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */ ++ if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX) ++ return NULL; + ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); + if (ref_field) { + if (init_var_ref(ref_field, var_field, system, event_name)) { +@@ -3586,6 +3588,7 @@ static int parse_action_params(struct trace_array *tr, char *params, + while (params) { + if (data->n_params >= SYNTH_FIELDS_MAX) { + hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); ++ ret = -EINVAL; + goto out; + } + +@@ -3922,6 +3925,10 @@ static int trace_action_create(struct hist_trigger_data *hist_data, + + lockdep_assert_held(&event_mutex); + ++ /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */ ++ if (data->n_params > SYNTH_FIELDS_MAX) ++ return -EINVAL; ++ + if (data->use_trace_keyword) + synth_event_name = data->synth_event_name; + else +diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c +index c3b582d19b620..67592eed0be8d 100644 +--- a/kernel/trace/trace_events_synth.c ++++ b/kernel/trace/trace_events_synth.c +@@ -1282,12 +1282,12 @@ static int __create_synth_event(const char *name, const char *raw_fields) + goto err_free_arg; + } + +- fields[n_fields++] = field; + if (n_fields == SYNTH_FIELDS_MAX) { + synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0); + ret = -EINVAL; + goto err_free_arg; + } ++ fields[n_fields++] = field; + + n_fields_this_loop++; + } +diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c +index 36dff277de464..bb2f95d7175c2 100644 +--- a/kernel/trace/trace_probe.c ++++ b/kernel/trace/trace_probe.c +@@ -246,7 +246,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup, + return -EINVAL; + } + strlcpy(buf, event, slash - event + 1); +- if (!is_good_name(buf)) { ++ if (!is_good_system_name(buf)) { + trace_probe_log_err(offset, BAD_GROUP_NAME); + return -EINVAL; + } +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug +index 3638b3424be53..12dfe6691dd52 100644 +--- a/lib/Kconfig.debug ++++ b/lib/Kconfig.debug +@@ -2092,6 +2092,7 @@ config TEST_MIN_HEAP + config TEST_SORT + tristate "Array-based sort test" if !KUNIT_ALL_TESTS + depends on KUNIT ++ select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE + default KUNIT_ALL_TESTS + help + This option enables the self-test function of 'sort()' at boot, +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index e36ca75311a5c..9c251faeb6f59 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -255,6 +255,152 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) + return subpool_inode(file_inode(vma->vm_file)); + } + ++/* ++ * hugetlb vma_lock helper routines ++ */ ++static bool __vma_shareable_lock(struct vm_area_struct *vma) ++{ ++ return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) && ++ vma->vm_private_data; ++} ++ ++void hugetlb_vma_lock_read(struct vm_area_struct *vma) ++{ ++ if (__vma_shareable_lock(vma)) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ down_read(&vma_lock->rw_sema); ++ } ++} ++ ++void hugetlb_vma_unlock_read(struct vm_area_struct *vma) ++{ ++ if (__vma_shareable_lock(vma)) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ up_read(&vma_lock->rw_sema); ++ } ++} ++ ++void hugetlb_vma_lock_write(struct vm_area_struct *vma) ++{ ++ if (__vma_shareable_lock(vma)) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ down_write(&vma_lock->rw_sema); ++ } ++} ++ ++void hugetlb_vma_unlock_write(struct vm_area_struct *vma) ++{ ++ if (__vma_shareable_lock(vma)) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ up_write(&vma_lock->rw_sema); ++ } ++} ++ ++int hugetlb_vma_trylock_write(struct vm_area_struct *vma) ++{ ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ if (!__vma_shareable_lock(vma)) ++ return 1; ++ ++ return down_write_trylock(&vma_lock->rw_sema); ++} ++ ++void hugetlb_vma_assert_locked(struct vm_area_struct *vma) ++{ ++ if (__vma_shareable_lock(vma)) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ lockdep_assert_held(&vma_lock->rw_sema); ++ } ++} ++ ++void hugetlb_vma_lock_release(struct kref *kref) ++{ ++ struct hugetlb_vma_lock *vma_lock = container_of(kref, ++ struct hugetlb_vma_lock, refs); ++ ++ kfree(vma_lock); ++} ++ ++static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) ++{ ++ struct vm_area_struct *vma = vma_lock->vma; ++ ++ /* ++ * vma_lock structure may or not be released as a result of put, ++ * it certainly will no longer be attached to vma so clear pointer. ++ * Semaphore synchronizes access to vma_lock->vma field. ++ */ ++ vma_lock->vma = NULL; ++ vma->vm_private_data = NULL; ++ up_write(&vma_lock->rw_sema); ++ kref_put(&vma_lock->refs, hugetlb_vma_lock_release); ++} ++ ++static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) ++{ ++ if (__vma_shareable_lock(vma)) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ __hugetlb_vma_unlock_write_put(vma_lock); ++ } ++} ++ ++static void hugetlb_vma_lock_free(struct vm_area_struct *vma) ++{ ++ /* ++ * Only present in sharable vmas. ++ */ ++ if (!vma || !__vma_shareable_lock(vma)) ++ return; ++ ++ if (vma->vm_private_data) { ++ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; ++ ++ down_write(&vma_lock->rw_sema); ++ __hugetlb_vma_unlock_write_put(vma_lock); ++ } ++} ++ ++static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) ++{ ++ struct hugetlb_vma_lock *vma_lock; ++ ++ /* Only establish in (flags) sharable vmas */ ++ if (!vma || !(vma->vm_flags & VM_MAYSHARE)) ++ return; ++ ++ /* Should never get here with non-NULL vm_private_data */ ++ if (vma->vm_private_data) ++ return; ++ ++ vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); ++ if (!vma_lock) { ++ /* ++ * If we can not allocate structure, then vma can not ++ * participate in pmd sharing. This is only a possible ++ * performance enhancement and memory saving issue. ++ * However, the lock is also used to synchronize page ++ * faults with truncation. If the lock is not present, ++ * unlikely races could leave pages in a file past i_size ++ * until the file is removed. Warn in the unlikely case of ++ * allocation failure. ++ */ ++ pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); ++ return; ++ } ++ ++ kref_init(&vma_lock->refs); ++ init_rwsem(&vma_lock->rw_sema); ++ vma_lock->vma = vma; ++ vma->vm_private_data = vma_lock; ++} ++ + /* Helper that removes a struct file_region from the resv_map cache and returns + * it for use. + */ +@@ -6557,7 +6703,8 @@ bool hugetlb_reserve_pages(struct inode *inode, + } + + /* +- * vma specific semaphore used for pmd sharing synchronization ++ * vma specific semaphore used for pmd sharing and fault/truncation ++ * synchronization + */ + hugetlb_vma_lock_alloc(vma); + +@@ -6813,149 +6960,6 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, + *end = ALIGN(*end, PUD_SIZE); + } + +-static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma) +-{ +- return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) && +- vma->vm_private_data; +-} +- +-void hugetlb_vma_lock_read(struct vm_area_struct *vma) +-{ +- if (__vma_shareable_flags_pmd(vma)) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- down_read(&vma_lock->rw_sema); +- } +-} +- +-void hugetlb_vma_unlock_read(struct vm_area_struct *vma) +-{ +- if (__vma_shareable_flags_pmd(vma)) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- up_read(&vma_lock->rw_sema); +- } +-} +- +-void hugetlb_vma_lock_write(struct vm_area_struct *vma) +-{ +- if (__vma_shareable_flags_pmd(vma)) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- down_write(&vma_lock->rw_sema); +- } +-} +- +-void hugetlb_vma_unlock_write(struct vm_area_struct *vma) +-{ +- if (__vma_shareable_flags_pmd(vma)) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- up_write(&vma_lock->rw_sema); +- } +-} +- +-int hugetlb_vma_trylock_write(struct vm_area_struct *vma) +-{ +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- if (!__vma_shareable_flags_pmd(vma)) +- return 1; +- +- return down_write_trylock(&vma_lock->rw_sema); +-} +- +-void hugetlb_vma_assert_locked(struct vm_area_struct *vma) +-{ +- if (__vma_shareable_flags_pmd(vma)) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- lockdep_assert_held(&vma_lock->rw_sema); +- } +-} +- +-void hugetlb_vma_lock_release(struct kref *kref) +-{ +- struct hugetlb_vma_lock *vma_lock = container_of(kref, +- struct hugetlb_vma_lock, refs); +- +- kfree(vma_lock); +-} +- +-static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) +-{ +- struct vm_area_struct *vma = vma_lock->vma; +- +- /* +- * vma_lock structure may or not be released as a result of put, +- * it certainly will no longer be attached to vma so clear pointer. +- * Semaphore synchronizes access to vma_lock->vma field. +- */ +- vma_lock->vma = NULL; +- vma->vm_private_data = NULL; +- up_write(&vma_lock->rw_sema); +- kref_put(&vma_lock->refs, hugetlb_vma_lock_release); +-} +- +-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) +-{ +- if (__vma_shareable_flags_pmd(vma)) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- __hugetlb_vma_unlock_write_put(vma_lock); +- } +-} +- +-static void hugetlb_vma_lock_free(struct vm_area_struct *vma) +-{ +- /* +- * Only present in sharable vmas. +- */ +- if (!vma || !__vma_shareable_flags_pmd(vma)) +- return; +- +- if (vma->vm_private_data) { +- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; +- +- down_write(&vma_lock->rw_sema); +- __hugetlb_vma_unlock_write_put(vma_lock); +- } +-} +- +-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) +-{ +- struct hugetlb_vma_lock *vma_lock; +- +- /* Only establish in (flags) sharable vmas */ +- if (!vma || !(vma->vm_flags & VM_MAYSHARE)) +- return; +- +- /* Should never get here with non-NULL vm_private_data */ +- if (vma->vm_private_data) +- return; +- +- vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); +- if (!vma_lock) { +- /* +- * If we can not allocate structure, then vma can not +- * participate in pmd sharing. This is only a possible +- * performance enhancement and memory saving issue. +- * However, the lock is also used to synchronize page +- * faults with truncation. If the lock is not present, +- * unlikely races could leave pages in a file past i_size +- * until the file is removed. Warn in the unlikely case of +- * allocation failure. +- */ +- pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); +- return; +- } +- +- kref_init(&vma_lock->refs); +- init_rwsem(&vma_lock->rw_sema); +- vma_lock->vma = vma; +- vma->vm_private_data = vma_lock; +-} +- + /* + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the +@@ -7044,47 +7048,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + + #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ + +-void hugetlb_vma_lock_read(struct vm_area_struct *vma) +-{ +-} +- +-void hugetlb_vma_unlock_read(struct vm_area_struct *vma) +-{ +-} +- +-void hugetlb_vma_lock_write(struct vm_area_struct *vma) +-{ +-} +- +-void hugetlb_vma_unlock_write(struct vm_area_struct *vma) +-{ +-} +- +-int hugetlb_vma_trylock_write(struct vm_area_struct *vma) +-{ +- return 1; +-} +- +-void hugetlb_vma_assert_locked(struct vm_area_struct *vma) +-{ +-} +- +-void hugetlb_vma_lock_release(struct kref *kref) +-{ +-} +- +-static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) +-{ +-} +- +-static void hugetlb_vma_lock_free(struct vm_area_struct *vma) +-{ +-} +- +-static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) +-{ +-} +- + pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pud_t *pud) + { +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c +index 942d2dfa11151..26fb97d1d4d9a 100644 +--- a/net/ipv4/syncookies.c ++++ b/net/ipv4/syncookies.c +@@ -288,12 +288,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + struct tcp_request_sock *treq; + struct request_sock *req; + +-#ifdef CONFIG_MPTCP + if (sk_is_mptcp(sk)) +- ops = &mptcp_subflow_request_sock_ops; +-#endif ++ req = mptcp_subflow_reqsk_alloc(ops, sk, false); ++ else ++ req = inet_reqsk_alloc(ops, sk, false); + +- req = inet_reqsk_alloc(ops, sk, false); + if (!req) + return NULL; + +diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c +index 9e82250cbb703..0430415357ba3 100644 +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -156,6 +156,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) + + if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { + GENL_SET_ERR_MSG(info, "invalid addr id or flags"); ++ err = -EINVAL; + goto announce_err; + } + +@@ -282,6 +283,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info) + + if (addr_l.id == 0) { + NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id"); ++ err = -EINVAL; + goto create_err; + } + +@@ -395,11 +397,13 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info) + + if (addr_l.family != addr_r.family) { + GENL_SET_ERR_MSG(info, "address families do not match"); ++ err = -EINVAL; + goto destroy_err; + } + + if (!addr_l.port || !addr_r.port) { + GENL_SET_ERR_MSG(info, "missing local or remote port"); ++ err = -EINVAL; + goto destroy_err; + } + +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 2159b5f9988f8..613f515fedf0a 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -45,7 +45,6 @@ static void subflow_req_destructor(struct request_sock *req) + sock_put((struct sock *)subflow_req->msk); + + mptcp_token_destroy_request(req); +- tcp_request_sock_ops.destructor(req); + } + + static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, +@@ -529,7 +528,7 @@ static int subflow_v6_rebuild_header(struct sock *sk) + } + #endif + +-struct request_sock_ops mptcp_subflow_request_sock_ops; ++static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init; + static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init; + + static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) +@@ -542,7 +541,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) + if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) + goto drop; + +- return tcp_conn_request(&mptcp_subflow_request_sock_ops, ++ return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops, + &subflow_request_sock_ipv4_ops, + sk, skb); + drop: +@@ -550,7 +549,14 @@ drop: + return 0; + } + ++static void subflow_v4_req_destructor(struct request_sock *req) ++{ ++ subflow_req_destructor(req); ++ tcp_request_sock_ops.destructor(req); ++} ++ + #if IS_ENABLED(CONFIG_MPTCP_IPV6) ++static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init; + static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; + static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; + static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; +@@ -573,15 +579,36 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) + return 0; + } + +- return tcp_conn_request(&mptcp_subflow_request_sock_ops, ++ return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops, + &subflow_request_sock_ipv6_ops, sk, skb); + + drop: + tcp_listendrop(sk); + return 0; /* don't send reset */ + } ++ ++static void subflow_v6_req_destructor(struct request_sock *req) ++{ ++ subflow_req_destructor(req); ++ tcp6_request_sock_ops.destructor(req); ++} ++#endif ++ ++struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops, ++ struct sock *sk_listener, ++ bool attach_listener) ++{ ++ if (ops->family == AF_INET) ++ ops = &mptcp_subflow_v4_request_sock_ops; ++#if IS_ENABLED(CONFIG_MPTCP_IPV6) ++ else if (ops->family == AF_INET6) ++ ops = &mptcp_subflow_v6_request_sock_ops; + #endif + ++ return inet_reqsk_alloc(ops, sk_listener, attach_listener); ++} ++EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc); ++ + /* validate hmac received in third ACK */ + static bool subflow_hmac_valid(const struct request_sock *req, + const struct mptcp_options_received *mp_opt) +@@ -1904,7 +1931,6 @@ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = { + static int subflow_ops_init(struct request_sock_ops *subflow_ops) + { + subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock); +- subflow_ops->slab_name = "request_sock_subflow"; + + subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name, + subflow_ops->obj_size, 0, +@@ -1914,16 +1940,17 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) + if (!subflow_ops->slab) + return -ENOMEM; + +- subflow_ops->destructor = subflow_req_destructor; +- + return 0; + } + + void __init mptcp_subflow_init(void) + { +- mptcp_subflow_request_sock_ops = tcp_request_sock_ops; +- if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0) +- panic("MPTCP: failed to init subflow request sock ops\n"); ++ mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops; ++ mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4"; ++ mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor; ++ ++ if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0) ++ panic("MPTCP: failed to init subflow v4 request sock ops\n"); + + subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; + subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req; +@@ -1938,6 +1965,20 @@ void __init mptcp_subflow_init(void) + tcp_prot_override.release_cb = tcp_release_cb_override; + + #if IS_ENABLED(CONFIG_MPTCP_IPV6) ++ /* In struct mptcp_subflow_request_sock, we assume the TCP request sock ++ * structures for v4 and v6 have the same size. It should not changed in ++ * the future but better to make sure to be warned if it is no longer ++ * the case. ++ */ ++ BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock)); ++ ++ mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops; ++ mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6"; ++ mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor; ++ ++ if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0) ++ panic("MPTCP: failed to init subflow v6 request sock ops\n"); ++ + subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; + subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req; + +diff --git a/security/device_cgroup.c b/security/device_cgroup.c +index a9f8c63a96d1a..bef2b9285fb34 100644 +--- a/security/device_cgroup.c ++++ b/security/device_cgroup.c +@@ -82,6 +82,17 @@ free_and_exit: + return -ENOMEM; + } + ++static void dev_exceptions_move(struct list_head *dest, struct list_head *orig) ++{ ++ struct dev_exception_item *ex, *tmp; ++ ++ lockdep_assert_held(&devcgroup_mutex); ++ ++ list_for_each_entry_safe(ex, tmp, orig, list) { ++ list_move_tail(&ex->list, dest); ++ } ++} ++ + /* + * called under devcgroup_mutex + */ +@@ -604,11 +615,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, + int count, rc = 0; + struct dev_exception_item ex; + struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent); ++ struct dev_cgroup tmp_devcgrp; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + memset(&ex, 0, sizeof(ex)); ++ memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp)); + b = buffer; + + switch (*b) { +@@ -620,15 +633,27 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, + + if (!may_allow_all(parent)) + return -EPERM; +- dev_exception_clean(devcgroup); +- devcgroup->behavior = DEVCG_DEFAULT_ALLOW; +- if (!parent) ++ if (!parent) { ++ devcgroup->behavior = DEVCG_DEFAULT_ALLOW; ++ dev_exception_clean(devcgroup); + break; ++ } + ++ INIT_LIST_HEAD(&tmp_devcgrp.exceptions); ++ rc = dev_exceptions_copy(&tmp_devcgrp.exceptions, ++ &devcgroup->exceptions); ++ if (rc) ++ return rc; ++ dev_exception_clean(devcgroup); + rc = dev_exceptions_copy(&devcgroup->exceptions, + &parent->exceptions); +- if (rc) ++ if (rc) { ++ dev_exceptions_move(&devcgroup->exceptions, ++ &tmp_devcgrp.exceptions); + return rc; ++ } ++ devcgroup->behavior = DEVCG_DEFAULT_ALLOW; ++ dev_exception_clean(&tmp_devcgrp); + break; + case DEVCG_DENY: + if (css_has_online_children(&devcgroup->css)) +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig +index 7249f16257c72..39caeca474449 100644 +--- a/security/integrity/ima/Kconfig ++++ b/security/integrity/ima/Kconfig +@@ -112,7 +112,7 @@ choice + + config IMA_DEFAULT_HASH_SM3 + bool "SM3" +- depends on CRYPTO_SM3=y ++ depends on CRYPTO_SM3_GENERIC=y + endchoice + + config IMA_DEFAULT_HASH +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 040b03ddc1c77..4a207a3ef7ef3 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -542,8 +542,13 @@ static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf, + + rc = ima_collect_measurement(&tmp_iint, file, NULL, 0, + ima_hash_algo, NULL); +- if (rc < 0) ++ if (rc < 0) { ++ /* ima_hash could be allocated in case of failure. */ ++ if (rc != -ENOMEM) ++ kfree(tmp_iint.ima_hash); ++ + return -EOPNOTSUPP; ++ } + + iint = &tmp_iint; + mutex_lock(&iint->mutex); +diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c +index 195ac18f09275..04c49f05cb74f 100644 +--- a/security/integrity/ima/ima_template.c ++++ b/security/integrity/ima/ima_template.c +@@ -340,8 +340,11 @@ static struct ima_template_desc *restore_template_fmt(char *template_name) + + template_desc->name = ""; + template_desc->fmt = kstrdup(template_name, GFP_KERNEL); +- if (!template_desc->fmt) ++ if (!template_desc->fmt) { ++ kfree(template_desc); ++ template_desc = NULL; + goto out; ++ } + + spin_lock(&template_list); + list_add_tail_rcu(&template_desc->list, &defined_templates); +diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c +index b78753d27d8ea..d1fdd113450a6 100644 +--- a/security/integrity/platform_certs/load_uefi.c ++++ b/security/integrity/platform_certs/load_uefi.c +@@ -35,6 +35,7 @@ static const struct dmi_system_id uefi_skip_cert[] = { + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") }, + { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") }, ++ { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMacPro1,1") }, + { } + }; + +diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c +index 754aa8ddd2e4f..0ba1fbcbb21e4 100644 +--- a/sound/pci/hda/patch_cs8409.c ++++ b/sound/pci/hda/patch_cs8409.c +@@ -888,7 +888,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42) + + /* Initialize CS42L42 companion codec */ + cs8409_i2c_bulk_write(cs42l42, cs42l42->init_seq, cs42l42->init_seq_num); +- usleep_range(20000, 25000); ++ usleep_range(30000, 35000); + + /* Clear interrupts, by reading interrupt status registers */ + cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs)); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index f5f640851fdcb..3794b522c2222 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6903,6 +6903,34 @@ static void alc287_fixup_yoga9_14iap7_bass_spk_pin(struct hda_codec *codec, + } + } + ++static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec, ++ const struct hda_fixup *fix, int action) ++{ ++ static const struct hda_pintbl pincfgs[] = { ++ { 0x14, 0x90170151 }, ++ { 0x17, 0x90170150 }, ++ { } ++ }; ++ static const hda_nid_t conn[] = { 0x02, 0x03 }; ++ static const hda_nid_t preferred_pairs[] = { ++ 0x14, 0x02, ++ 0x17, 0x03, ++ 0x21, 0x02, ++ 0 ++ }; ++ struct alc_spec *spec = codec->spec; ++ ++ alc_fixup_no_shutup(codec, fix, action); ++ ++ switch (action) { ++ case HDA_FIXUP_ACT_PRE_PROBE: ++ snd_hda_apply_pincfgs(codec, pincfgs); ++ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); ++ spec->gen.preferred_dacs = preferred_pairs; ++ break; ++ } ++} ++ + enum { + ALC269_FIXUP_GPIO2, + ALC269_FIXUP_SONY_VAIO, +@@ -7146,6 +7174,8 @@ enum { + ALC287_FIXUP_LEGION_16ITHG6, + ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK, + ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, ++ ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS, ++ ALC236_FIXUP_DELL_DUAL_CODECS, + }; + + /* A special fixup for Lenovo C940 and Yoga Duet 7; +@@ -9095,6 +9125,18 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK, + }, ++ [ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = alc295_fixup_dell_inspiron_top_speakers, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, ++ }, ++ [ALC236_FIXUP_DELL_DUAL_CODECS] = { ++ .type = HDA_FIXUP_PINS, ++ .v.func = alc1220_fixup_gb_dual_codecs, ++ .chained = true, ++ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ++ }, + }; + + static const struct snd_pci_quirk alc269_fixup_tbl[] = { +@@ -9195,6 +9237,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), + SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), ++ SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), ++ SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS), ++ SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), ++ SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS), ++ SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), ++ SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), ++ SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS), ++ SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c +index c4c1e89b47c1b..83cb81999c6fc 100644 +--- a/sound/soc/jz4740/jz4740-i2s.c ++++ b/sound/soc/jz4740/jz4740-i2s.c +@@ -55,7 +55,8 @@ + #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) + #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) + #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) +-#define JZ_AIC_CTRL_FLUSH BIT(8) ++#define JZ_AIC_CTRL_TFLUSH BIT(8) ++#define JZ_AIC_CTRL_RFLUSH BIT(7) + #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) + #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) + #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) +@@ -90,6 +91,8 @@ enum jz47xx_i2s_version { + struct i2s_soc_info { + enum jz47xx_i2s_version version; + struct snd_soc_dai_driver *dai; ++ ++ bool shared_fifo_flush; + }; + + struct jz4740_i2s { +@@ -116,19 +119,44 @@ static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, + writel(value, i2s->base + reg); + } + ++static inline void jz4740_i2s_set_bits(const struct jz4740_i2s *i2s, ++ unsigned int reg, uint32_t bits) ++{ ++ uint32_t value = jz4740_i2s_read(i2s, reg); ++ value |= bits; ++ jz4740_i2s_write(i2s, reg, value); ++} ++ + static int jz4740_i2s_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) + { + struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); +- uint32_t conf, ctrl; ++ uint32_t conf; + int ret; + ++ /* ++ * When we can flush FIFOs independently, only flush the FIFO ++ * that is starting up. We can do this when the DAI is active ++ * because it does not disturb other active substreams. ++ */ ++ if (!i2s->soc_info->shared_fifo_flush) { ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH); ++ else ++ jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_RFLUSH); ++ } ++ + if (snd_soc_dai_active(dai)) + return 0; + +- ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); +- ctrl |= JZ_AIC_CTRL_FLUSH; +- jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); ++ /* ++ * When there is a shared flush bit for both FIFOs, the TFLUSH ++ * bit flushes both FIFOs. Flushing while the DAI is active would ++ * cause FIFO underruns in other active substreams so we have to ++ * guard this behind the snd_soc_dai_active() check. ++ */ ++ if (i2s->soc_info->shared_fifo_flush) ++ jz4740_i2s_set_bits(i2s, JZ_REG_AIC_CTRL, JZ_AIC_CTRL_TFLUSH); + + ret = clk_prepare_enable(i2s->clk_i2s); + if (ret) +@@ -443,6 +471,7 @@ static struct snd_soc_dai_driver jz4740_i2s_dai = { + static const struct i2s_soc_info jz4740_i2s_soc_info = { + .version = JZ_I2S_JZ4740, + .dai = &jz4740_i2s_dai, ++ .shared_fifo_flush = true, + }; + + static const struct i2s_soc_info jz4760_i2s_soc_info = { +diff --git a/sound/usb/card.h b/sound/usb/card.h +index 40061550105ac..6ec95b2edf863 100644 +--- a/sound/usb/card.h ++++ b/sound/usb/card.h +@@ -131,6 +131,7 @@ struct snd_usb_endpoint { + bool lowlatency_playback; /* low-latency playback mode */ + bool need_setup; /* (re-)need for hw_params? */ + bool need_prepare; /* (re-)need for prepare? */ ++ bool fixed_rate; /* skip rate setup */ + + /* for hw constraints */ + const struct audioformat *cur_audiofmt; +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c +index 4aaf0784940b5..419302e2057e8 100644 +--- a/sound/usb/endpoint.c ++++ b/sound/usb/endpoint.c +@@ -769,7 +769,8 @@ struct snd_usb_endpoint * + snd_usb_endpoint_open(struct snd_usb_audio *chip, + const struct audioformat *fp, + const struct snd_pcm_hw_params *params, +- bool is_sync_ep) ++ bool is_sync_ep, ++ bool fixed_rate) + { + struct snd_usb_endpoint *ep; + int ep_num = is_sync_ep ? fp->sync_ep : fp->endpoint; +@@ -825,6 +826,7 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip, + ep->implicit_fb_sync = fp->implicit_fb; + ep->need_setup = true; + ep->need_prepare = true; ++ ep->fixed_rate = fixed_rate; + + usb_audio_dbg(chip, " channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n", + ep->cur_channels, ep->cur_rate, +@@ -1413,11 +1415,13 @@ static int init_sample_rate(struct snd_usb_audio *chip, + if (clock && !clock->need_setup) + return 0; + +- err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, rate); +- if (err < 0) { +- if (clock) +- clock->rate = 0; /* reset rate */ +- return err; ++ if (!ep->fixed_rate) { ++ err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, rate); ++ if (err < 0) { ++ if (clock) ++ clock->rate = 0; /* reset rate */ ++ return err; ++ } + } + + if (clock) +diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h +index e67ea28faa54f..924f4351588ce 100644 +--- a/sound/usb/endpoint.h ++++ b/sound/usb/endpoint.h +@@ -14,7 +14,8 @@ struct snd_usb_endpoint * + snd_usb_endpoint_open(struct snd_usb_audio *chip, + const struct audioformat *fp, + const struct snd_pcm_hw_params *params, +- bool is_sync_ep); ++ bool is_sync_ep, ++ bool fixed_rate); + void snd_usb_endpoint_close(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep); + int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, +diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c +index f3e8484b3d9cb..41ac7185b42b6 100644 +--- a/sound/usb/implicit.c ++++ b/sound/usb/implicit.c +@@ -15,6 +15,7 @@ + #include "usbaudio.h" + #include "card.h" + #include "helper.h" ++#include "pcm.h" + #include "implicit.h" + + enum { +@@ -455,7 +456,8 @@ const struct audioformat * + snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip, + const struct audioformat *target, + const struct snd_pcm_hw_params *params, +- int stream) ++ int stream, ++ bool *fixed_rate) + { + struct snd_usb_substream *subs; + const struct audioformat *fp, *sync_fmt = NULL; +@@ -483,6 +485,8 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip, + } + } + ++ if (fixed_rate) ++ *fixed_rate = snd_usb_pcm_has_fixed_rate(subs); + return sync_fmt; + } + +diff --git a/sound/usb/implicit.h b/sound/usb/implicit.h +index ccb415a0ea860..7f1577b6c4d38 100644 +--- a/sound/usb/implicit.h ++++ b/sound/usb/implicit.h +@@ -9,6 +9,6 @@ const struct audioformat * + snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip, + const struct audioformat *target, + const struct snd_pcm_hw_params *params, +- int stream); ++ int stream, bool *fixed_rate); + + #endif /* __USBAUDIO_IMPLICIT_H */ +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index 9557bd4d1bbca..99a66d0ef5b26 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -157,6 +157,31 @@ find_substream_format(struct snd_usb_substream *subs, + true, subs); + } + ++bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs) ++{ ++ const struct audioformat *fp; ++ struct snd_usb_audio *chip = subs->stream->chip; ++ int rate = -1; ++ ++ if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE)) ++ return false; ++ list_for_each_entry(fp, &subs->fmt_list, list) { ++ if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) ++ return false; ++ if (fp->nr_rates < 1) ++ continue; ++ if (fp->nr_rates > 1) ++ return false; ++ if (rate < 0) { ++ rate = fp->rate_table[0]; ++ continue; ++ } ++ if (rate != fp->rate_table[0]) ++ return false; ++ } ++ return true; ++} ++ + static int init_pitch_v1(struct snd_usb_audio *chip, int ep) + { + struct usb_device *dev = chip->dev; +@@ -450,12 +475,14 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, + struct snd_usb_audio *chip = subs->stream->chip; + const struct audioformat *fmt; + const struct audioformat *sync_fmt; ++ bool fixed_rate, sync_fixed_rate; + int ret; + + ret = snd_media_start_pipeline(subs); + if (ret) + return ret; + ++ fixed_rate = snd_usb_pcm_has_fixed_rate(subs); + fmt = find_substream_format(subs, hw_params); + if (!fmt) { + usb_audio_dbg(chip, +@@ -469,7 +496,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, + if (fmt->implicit_fb) { + sync_fmt = snd_usb_find_implicit_fb_sync_format(chip, fmt, + hw_params, +- !substream->stream); ++ !substream->stream, ++ &sync_fixed_rate); + if (!sync_fmt) { + usb_audio_dbg(chip, + "cannot find sync format: ep=0x%x, iface=%d:%d, format=%s, rate=%d, channels=%d\n", +@@ -482,6 +510,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, + } + } else { + sync_fmt = fmt; ++ sync_fixed_rate = fixed_rate; + } + + ret = snd_usb_lock_shutdown(chip); +@@ -499,7 +528,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, + close_endpoints(chip, subs); + } + +- subs->data_endpoint = snd_usb_endpoint_open(chip, fmt, hw_params, false); ++ subs->data_endpoint = snd_usb_endpoint_open(chip, fmt, hw_params, false, fixed_rate); + if (!subs->data_endpoint) { + ret = -EINVAL; + goto unlock; +@@ -508,7 +537,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, + if (fmt->sync_ep) { + subs->sync_endpoint = snd_usb_endpoint_open(chip, sync_fmt, + hw_params, +- fmt == sync_fmt); ++ fmt == sync_fmt, ++ sync_fixed_rate); + if (!subs->sync_endpoint) { + ret = -EINVAL; + goto unlock; +diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h +index 493a4e34d78dc..388fe2ba346d6 100644 +--- a/sound/usb/pcm.h ++++ b/sound/usb/pcm.h +@@ -6,6 +6,8 @@ void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream); + int snd_usb_pcm_suspend(struct snd_usb_stream *as); + int snd_usb_pcm_resume(struct snd_usb_stream *as); + ++bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *as); ++ + int snd_usb_init_pitch(struct snd_usb_audio *chip, + const struct audioformat *fmt); + void snd_usb_preallocate_buffer(struct snd_usb_substream *subs); +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index 58b37bfc885cb..3d13fdf7590cd 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -2152,6 +2152,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { + QUIRK_FLAG_GENERIC_IMPLICIT_FB), + DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */ + QUIRK_FLAG_IFACE_SKIP_CLOSE), ++ DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */ ++ QUIRK_FLAG_FIXED_RATE), + + /* Vendor matches */ + VENDOR_FLG(0x045e, /* MS Lifecam */ +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h +index 2aba508a48312..f5a8dca66457f 100644 +--- a/sound/usb/usbaudio.h ++++ b/sound/usb/usbaudio.h +@@ -175,6 +175,9 @@ extern bool snd_usb_skip_validation; + * QUIRK_FLAG_FORCE_IFACE_RESET + * Force an interface reset whenever stopping & restarting a stream + * (e.g. after xrun) ++ * QUIRK_FLAG_FIXED_RATE ++ * Do not set PCM rate (frequency) when only one rate is available ++ * for the given endpoint. + */ + + #define QUIRK_FLAG_GET_SAMPLE_RATE (1U << 0) +@@ -198,5 +201,6 @@ extern bool snd_usb_skip_validation; + #define QUIRK_FLAG_SKIP_IMPLICIT_FB (1U << 18) + #define QUIRK_FLAG_IFACE_SKIP_CLOSE (1U << 19) + #define QUIRK_FLAG_FORCE_IFACE_RESET (1U << 20) ++#define QUIRK_FLAG_FIXED_RATE (1U << 21) + + #endif /* __USBAUDIO_H */ +diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl +index 09d1578f9d66f..1737c59e4ff67 100755 +--- a/tools/testing/ktest/ktest.pl ++++ b/tools/testing/ktest/ktest.pl +@@ -1963,7 +1963,7 @@ sub run_scp_mod { + + sub _get_grub_index { + +- my ($command, $target, $skip) = @_; ++ my ($command, $target, $skip, $submenu) = @_; + + return if (defined($grub_number) && defined($last_grub_menu) && + $last_grub_menu eq $grub_menu && defined($last_machine) && +@@ -1980,11 +1980,16 @@ sub _get_grub_index { + + my $found = 0; + ++ my $submenu_number = 0; ++ + while () { + if (/$target/) { + $grub_number++; + $found = 1; + last; ++ } elsif (defined($submenu) && /$submenu/) { ++ $submenu_number++; ++ $grub_number = -1; + } elsif (/$skip/) { + $grub_number++; + } +@@ -1993,6 +1998,9 @@ sub _get_grub_index { + + dodie "Could not find '$grub_menu' through $command on $machine" + if (!$found); ++ if ($submenu_number > 0) { ++ $grub_number = "$submenu_number>$grub_number"; ++ } + doprint "$grub_number\n"; + $last_grub_menu = $grub_menu; + $last_machine = $machine; +@@ -2003,6 +2011,7 @@ sub get_grub_index { + my $command; + my $target; + my $skip; ++ my $submenu; + my $grub_menu_qt; + + if ($reboot_type !~ /^grub/) { +@@ -2017,8 +2026,9 @@ sub get_grub_index { + $skip = '^\s*title\s'; + } elsif ($reboot_type eq "grub2") { + $command = "cat $grub_file"; +- $target = '^menuentry.*' . $grub_menu_qt; +- $skip = '^menuentry\s|^submenu\s'; ++ $target = '^\s*menuentry.*' . $grub_menu_qt; ++ $skip = '^\s*menuentry'; ++ $submenu = '^\s*submenu\s'; + } elsif ($reboot_type eq "grub2bls") { + $command = $grub_bls_get; + $target = '^title=.*' . $grub_menu_qt; +@@ -2027,7 +2037,7 @@ sub get_grub_index { + return; + } + +- _get_grub_index($command, $target, $skip); ++ _get_grub_index($command, $target, $skip, $submenu); + } + + sub wait_for_input { +@@ -2090,7 +2100,7 @@ sub reboot_to { + if ($reboot_type eq "grub") { + run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; + } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) { +- run_ssh "$grub_reboot $grub_number"; ++ run_ssh "$grub_reboot \"'$grub_number'\""; + } elsif ($reboot_type eq "syslinux") { + run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path"; + } elsif (defined $reboot_script) { +@@ -3768,9 +3778,10 @@ sub test_this_config { + # .config to make sure it is missing the config that + # we had before + my %configs = %min_configs; +- delete $configs{$config}; ++ $configs{$config} = "# $config is not set"; + make_new_config ((values %configs), (values %keep_configs)); + make_oldconfig; ++ delete $configs{$config}; + undef %configs; + assign_configs \%configs, $output_config; + +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk +index a3ea3d4a206d0..291144c284fbc 100644 +--- a/tools/testing/selftests/lib.mk ++++ b/tools/testing/selftests/lib.mk +@@ -123,6 +123,11 @@ endef + clean: + $(CLEAN) + ++# Enables to extend CFLAGS and LDFLAGS from command line, e.g. ++# make USERCFLAGS=-Werror USERLDFLAGS=-static ++CFLAGS += $(USERCFLAGS) ++LDFLAGS += $(USERLDFLAGS) ++ + # When make O= with kselftest target from main level + # the following aren't defined. + # diff --git a/sys-kernel/pinephone-sources/files/1500_XATTR_USER_PREFIX.patch b/sys-kernel/pinephone-sources/files/1500_XATTR_USER_PREFIX.patch index 245dcc2..fac3eed 100644 --- a/sys-kernel/pinephone-sources/files/1500_XATTR_USER_PREFIX.patch +++ b/sys-kernel/pinephone-sources/files/1500_XATTR_USER_PREFIX.patch @@ -13,11 +13,10 @@ The namespace is added to any user with Extended Attribute support enabled for tmpfs. Users who do not enable xattrs will not have the XATTR_PAX flags preserved. -diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h -index 1590c49..5eab462 100644 ---- a/include/uapi/linux/xattr.h -+++ b/include/uapi/linux/xattr.h -@@ -73,5 +73,9 @@ + +--- a/include/uapi/linux/xattr.h 2022-11-22 05:56:58.175733644 -0500 ++++ b/include/uapi/linux/xattr.h 2022-11-22 06:04:26.394834989 -0500 +@@ -81,5 +81,9 @@ #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT @@ -27,12 +26,12 @@ index 1590c49..5eab462 100644 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX #endif /* _UAPI_LINUX_XATTR_H */ ---- a/mm/shmem.c 2020-05-04 15:30:27.042035334 -0400 -+++ b/mm/shmem.c 2020-05-04 15:34:57.013881725 -0400 -@@ -3238,6 +3238,14 @@ static int shmem_xattr_handler_set(const +--- a/mm/shmem.c 2022-11-22 05:57:29.011626215 -0500 ++++ b/mm/shmem.c 2022-11-22 06:03:33.165939400 -0500 +@@ -3297,6 +3297,14 @@ static int shmem_xattr_handler_set(const struct shmem_inode_info *info = SHMEM_I(inode); + int err; - name = xattr_full_name(handler, name); + + if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) { + if (strcmp(name, XATTR_NAME_PAX_FLAGS)) @@ -41,10 +40,10 @@ index 1590c49..5eab462 100644 + return -EINVAL; + } + - return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); - } - -@@ -3253,6 +3261,12 @@ static const struct xattr_handler shmem_ + name = xattr_full_name(handler, name); + err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); + if (!err) { +@@ -3312,6 +3320,12 @@ static const struct xattr_handler shmem_ .set = shmem_xattr_handler_set, }; @@ -54,10 +53,10 @@ index 1590c49..5eab462 100644 + .set = shmem_xattr_handler_set, +}; + - static const struct xattr_handler *shmem_xattr_handlers[] = { - #ifdef CONFIG_TMPFS_POSIX_ACL - &posix_acl_access_xattr_handler, -@@ -3260,6 +3274,7 @@ static const struct xattr_handler *shmem + static const struct xattr_handler shmem_trusted_xattr_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .get = shmem_xattr_handler_get, +@@ -3325,6 +3339,7 @@ static const struct xattr_handler *shmem #endif &shmem_security_xattr_handler, &shmem_trusted_xattr_handler, diff --git a/sys-kernel/pinephone-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch b/sys-kernel/pinephone-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch deleted file mode 100644 index e8c3015..0000000 --- a/sys-kernel/pinephone-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch +++ /dev/null @@ -1,17 +0,0 @@ ---- a/fs/namei.c 2022-01-23 13:02:27.876558299 -0500 -+++ b/fs/namei.c 2022-03-06 12:47:39.375719693 -0500 -@@ -1020,10 +1020,10 @@ static inline void put_link(struct namei - path_put(&last->link); - } - --static int sysctl_protected_symlinks __read_mostly; --static int sysctl_protected_hardlinks __read_mostly; --static int sysctl_protected_fifos __read_mostly; --static int sysctl_protected_regular __read_mostly; -+static int sysctl_protected_symlinks __read_mostly = 1; -+static int sysctl_protected_hardlinks __read_mostly = 1; -+int sysctl_protected_fifos __read_mostly = 1; -+int sysctl_protected_regular __read_mostly = 1; - - #ifdef CONFIG_SYSCTL - static struct ctl_table namei_sysctls[] = { diff --git a/sys-kernel/pinephone-sources/files/2910_bfp-mark-get-entry-ip-as--maybe-unused.patch b/sys-kernel/pinephone-sources/files/2910_bfp-mark-get-entry-ip-as--maybe-unused.patch new file mode 100644 index 0000000..a75b90c --- /dev/null +++ b/sys-kernel/pinephone-sources/files/2910_bfp-mark-get-entry-ip-as--maybe-unused.patch @@ -0,0 +1,11 @@ +--- a/kernel/trace/bpf_trace.c 2022-11-09 13:30:24.192940988 -0500 ++++ b/kernel/trace/bpf_trace.c 2022-11-09 13:30:59.029810818 -0500 +@@ -1027,7 +1027,7 @@ static const struct bpf_func_proto bpf_g + }; + + #ifdef CONFIG_X86_KERNEL_IBT +-static unsigned long get_entry_ip(unsigned long fentry_ip) ++static unsigned long __maybe_unused get_entry_ip(unsigned long fentry_ip) + { + u32 instr; + diff --git a/sys-kernel/pinephone-sources/files/4567_distro-Gentoo-Kconfig.patch b/sys-kernel/pinephone-sources/files/4567_distro-Gentoo-Kconfig.patch index 0a38098..9e0701d 100644 --- a/sys-kernel/pinephone-sources/files/4567_distro-Gentoo-Kconfig.patch +++ b/sys-kernel/pinephone-sources/files/4567_distro-Gentoo-Kconfig.patch @@ -1,14 +1,14 @@ ---- a/Kconfig 2022-05-11 13:20:07.110347567 -0400 -+++ b/Kconfig 2022-05-11 13:21:12.127174393 -0400 +--- a/Kconfig 2022-08-25 10:11:47.220973785 -0400 ++++ b/Kconfig 2022-08-25 10:11:56.997682513 -0400 @@ -30,3 +30,5 @@ source "lib/Kconfig" source "lib/Kconfig.debug" source "Documentation/Kconfig" + +source "distro/Kconfig" ---- /dev/null 2022-05-10 13:47:17.750578524 -0400 -+++ b/distro/Kconfig 2022-05-11 13:21:20.540529032 -0400 -@@ -0,0 +1,290 @@ +--- /dev/null 2022-08-25 07:13:06.694086407 -0400 ++++ b/distro/Kconfig 2022-08-25 13:21:55.150660724 -0400 +@@ -0,0 +1,291 @@ +menu "Gentoo Linux" + +config GENTOO_LINUX @@ -185,7 +185,7 @@ +config GENTOO_KERNEL_SELF_PROTECTION_COMMON + bool "Enable Kernel Self Protection Project Recommendations" + -+ depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT ++ depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT && SECURITY && !ARCH_EPHEMERAL_INODES && RANDSTRUCT_PERFORMANCE + + select BUG + select STRICT_KERNEL_RWX @@ -202,6 +202,7 @@ + select HARDENED_USERCOPY if HAVE_HARDENED_USERCOPY_ALLOCATOR=y + select KFENCE if HAVE_ARCH_KFENCE && (!SLAB || SLUB) + select RANDOMIZE_KSTACK_OFFSET_DEFAULT if HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET && (INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION>=140000) ++ select SECURITY_LANDLOCK + select SCHED_CORE if SCHED_SMT + select BUG_ON_DATA_CORRUPTION + select SCHED_STACK_END_CHECK @@ -224,7 +225,7 @@ + select GCC_PLUGIN_LATENT_ENTROPY + select GCC_PLUGIN_STRUCTLEAK + select GCC_PLUGIN_STRUCTLEAK_BYREF_ALL -+ select GCC_PLUGIN_RANDSTRUCT ++ select GCC_PLUGIN_RANDSTRUCT + select GCC_PLUGIN_RANDSTRUCT_PERFORMANCE + select ZERO_CALL_USED_REGS if CC_HAS_ZERO_CALL_USED_REGS + @@ -239,12 +240,12 @@ + depends on !X86_MSR && X86_64 && GENTOO_KERNEL_SELF_PROTECTION + default n + ++ select GCC_PLUGIN_STACKLEAK ++ select LEGACY_VSYSCALL_NONE ++ select PAGE_TABLE_ISOLATION + select RANDOMIZE_BASE + select RANDOMIZE_MEMORY + select RELOCATABLE -+ select LEGACY_VSYSCALL_NONE -+ select PAGE_TABLE_ISOLATION -+ select GCC_PLUGIN_STACKLEAK + select VMAP_STACK + + diff --git a/sys-kernel/pinephone-sources/files/5.19.10-11.patch b/sys-kernel/pinephone-sources/files/5.19.10-11.patch deleted file mode 100644 index a5ff5cb..0000000 --- a/sys-kernel/pinephone-sources/files/5.19.10-11.patch +++ /dev/null @@ -1,1231 +0,0 @@ -diff --git a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml -index 85c85b694217c..e18107eafe7cc 100644 ---- a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml -+++ b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml -@@ -96,7 +96,7 @@ properties: - Documentation/devicetree/bindings/arm/cpus.yaml). - - required: -- - fiq-index -+ - apple,fiq-index - - cpus - - required: -diff --git a/Makefile b/Makefile -index 33a9b6b547c47..01463a22926d5 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 19 --SUBLEVEL = 10 -+SUBLEVEL = 11 - EXTRAVERSION = - NAME = Superb Owl - -diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig -index cd2b3fe156724..c68c3581483ac 100644 ---- a/arch/parisc/Kconfig -+++ b/arch/parisc/Kconfig -@@ -225,8 +225,18 @@ config MLONGCALLS - Enabling this option will probably slow down your kernel. - - config 64BIT -- def_bool "$(ARCH)" = "parisc64" -+ def_bool y if "$(ARCH)" = "parisc64" -+ bool "64-bit kernel" if "$(ARCH)" = "parisc" - depends on PA8X00 -+ help -+ Enable this if you want to support 64bit kernel on PA-RISC platform. -+ -+ At the moment, only people willing to use more than 2GB of RAM, -+ or having a 64bit-only capable PA-RISC machine should say Y here. -+ -+ Since there is no 64bit userland on PA-RISC, there is no point to -+ enable this option otherwise. The 64bit kernel is significantly bigger -+ and slower than the 32bit one. - - choice - prompt "Kernel page size" -diff --git a/block/blk-core.c b/block/blk-core.c -index 27fb1357ad4b8..cc6fbcb6d2521 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -338,7 +338,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) - - while (!blk_try_enter_queue(q, pm)) { - if (flags & BLK_MQ_REQ_NOWAIT) -- return -EBUSY; -+ return -EAGAIN; - - /* - * read pair of barrier in blk_freeze_queue_start(), we need to -@@ -368,7 +368,7 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio) - if (test_bit(GD_DEAD, &disk->state)) - goto dead; - bio_wouldblock_error(bio); -- return -EBUSY; -+ return -EAGAIN; - } - - /* -diff --git a/block/blk-lib.c b/block/blk-lib.c -index 09b7e1200c0f4..20e42144065b8 100644 ---- a/block/blk-lib.c -+++ b/block/blk-lib.c -@@ -311,6 +311,11 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, - struct blk_plug plug; - int ret = 0; - -+ /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ -+ if (max_sectors > UINT_MAX >> SECTOR_SHIFT) -+ max_sectors = UINT_MAX >> SECTOR_SHIFT; -+ max_sectors &= ~bs_mask; -+ - if (max_sectors == 0) - return -EOPNOTSUPP; - if ((sector | nr_sects) & bs_mask) -@@ -324,10 +329,10 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, - - bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); - bio->bi_iter.bi_sector = sector; -- bio->bi_iter.bi_size = len; -+ bio->bi_iter.bi_size = len << SECTOR_SHIFT; - -- sector += len << SECTOR_SHIFT; -- nr_sects -= len << SECTOR_SHIFT; -+ sector += len; -+ nr_sects -= len; - if (!nr_sects) { - ret = submit_bio_wait(bio); - bio_put(bio); -diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c -index a964e25ea6206..763256efddc2b 100644 ---- a/drivers/gpio/gpio-mpc8xxx.c -+++ b/drivers/gpio/gpio-mpc8xxx.c -@@ -172,6 +172,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) - - switch (flow_type) { - case IRQ_TYPE_EDGE_FALLING: -+ case IRQ_TYPE_LEVEL_LOW: - raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags); - gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR, - gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR) -diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c -index e342a6dc4c6c1..bb953f6478647 100644 ---- a/drivers/gpio/gpio-rockchip.c -+++ b/drivers/gpio/gpio-rockchip.c -@@ -418,11 +418,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) - goto out; - } else { - bank->toggle_edge_mode |= mask; -- level |= mask; -+ level &= ~mask; - - /* - * Determine gpio state. If 1 next interrupt should be -- * falling otherwise rising. -+ * low otherwise high. - */ - data = readl(bank->reg_base + bank->gpio_regs->ext_port); - if (data & mask) -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index 67d4a3c13ed19..929f8b75bfaee 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -@@ -2391,8 +2391,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) - } - adev->ip_blocks[i].status.sw = true; - -- /* need to do gmc hw init early so we can allocate gpu mem */ -- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { -+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { -+ /* need to do common hw init early so everything is set up for gmc */ -+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); -+ if (r) { -+ DRM_ERROR("hw_init %d failed %d\n", i, r); -+ goto init_failed; -+ } -+ adev->ip_blocks[i].status.hw = true; -+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { -+ /* need to do gmc hw init early so we can allocate gpu mem */ - /* Try to reserve bad pages early */ - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_exchange_data(adev); -@@ -3078,8 +3086,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) - int i, r; - - static enum amd_ip_block_type ip_order[] = { -- AMD_IP_BLOCK_TYPE_GMC, - AMD_IP_BLOCK_TYPE_COMMON, -+ AMD_IP_BLOCK_TYPE_GMC, - AMD_IP_BLOCK_TYPE_PSP, - AMD_IP_BLOCK_TYPE_IH, - }; -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c -index f49db13b3fbee..0debdbcf46310 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c -@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, - WREG32_PCIE(smnPCIE_LC_CNTL, data); - } - -+#ifdef CONFIG_PCIEASPM - static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) - { - uint32_t def, data; -@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); - } -+#endif - - static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) - { -+#ifdef CONFIG_PCIEASPM - uint32_t def, data; - - def = data = RREG32_PCIE(smnPCIE_LC_CNTL); -@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL6, data); - -- nbio_v2_3_program_ltr(adev); -+ /* Don't bother about LTR if LTR is not enabled -+ * in the path */ -+ if (adev->pdev->ltr_path) -+ nbio_v2_3_program_ltr(adev); - - def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); - data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; -@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) - data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL3, data); -+#endif - } - - static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev) -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c -index f7f6ddebd3e49..37615a77287bc 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c -@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) - mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; - } - -+#ifdef CONFIG_PCIEASPM - static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) - { - uint32_t def, data; -@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); - } -+#endif - - static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) - { -+#ifdef CONFIG_PCIEASPM - uint32_t def, data; - - def = data = RREG32_PCIE(smnPCIE_LC_CNTL); -@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL6, data); - -- nbio_v6_1_program_ltr(adev); -+ /* Don't bother about LTR if LTR is not enabled -+ * in the path */ -+ if (adev->pdev->ltr_path) -+ nbio_v6_1_program_ltr(adev); - - def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); - data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; -@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) - data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL3, data); -+#endif - } - - const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c -index 11848d1e238b6..19455a7259391 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c -@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = { - }; - - -+#ifdef CONFIG_PCIEASPM - static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) - { - uint32_t def, data; -@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); - } -+#endif - - static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) - { -+#ifdef CONFIG_PCIEASPM - uint32_t def, data; - - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) -@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL6, data); - -- nbio_v7_4_program_ltr(adev); -+ /* Don't bother about LTR if LTR is not enabled -+ * in the path */ -+ if (adev->pdev->ltr_path) -+ nbio_v7_4_program_ltr(adev); - - def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); - data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; -@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) - data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; - if (def != data) - WREG32_PCIE(smnPCIE_LC_CNTL3, data); -+#endif - } - - const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { -diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c -index 65181efba50ec..56424f75dd2cc 100644 ---- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c -@@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) - WREG32_SDMA(i, mmSDMA0_CNTL, temp); - - if (!amdgpu_sriov_vf(adev)) { -+ ring = &adev->sdma.instance[i].ring; -+ adev->nbio.funcs->sdma_doorbell_range(adev, i, -+ ring->use_doorbell, ring->doorbell_index, -+ adev->doorbell_index.sdma_doorbell_range); -+ - /* unhalt engine */ - temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); - temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); -diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c -index fde6154f20096..183024d7c184e 100644 ---- a/drivers/gpu/drm/amd/amdgpu/soc15.c -+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c -@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle) - return 0; - } - --static void soc15_doorbell_range_init(struct amdgpu_device *adev) --{ -- int i; -- struct amdgpu_ring *ring; -- -- /* sdma/ih doorbell range are programed by hypervisor */ -- if (!amdgpu_sriov_vf(adev)) { -- for (i = 0; i < adev->sdma.num_instances; i++) { -- ring = &adev->sdma.instance[i].ring; -- adev->nbio.funcs->sdma_doorbell_range(adev, i, -- ring->use_doorbell, ring->doorbell_index, -- adev->doorbell_index.sdma_doorbell_range); -- } -- -- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, -- adev->irq.ih.doorbell_index); -- } --} -- - static int soc15_common_hw_init(void *handle) - { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; -@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle) - - /* enable the doorbell aperture */ - soc15_enable_doorbell_aperture(adev, true); -- /* HW doorbell routing policy: doorbell writing not -- * in SDMA/IH/MM/ACV range will be routed to CP. So -- * we need to init SDMA/IH/MM/ACV doorbell range prior -- * to CP ip block init and ring test. -- */ -- soc15_doorbell_range_init(adev); - - return 0; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c -index 03b7066471f9a..1e83db0c5438d 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c -+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c -@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) - } - } - -+ if (!amdgpu_sriov_vf(adev)) -+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, -+ adev->irq.ih.doorbell_index); -+ - pci_set_master(adev->pdev); - - /* enable interrupts */ -diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c -index 2022ffbb8dba5..59dfca093155c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c -+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c -@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) - } - } - -+ if (!amdgpu_sriov_vf(adev)) -+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, -+ adev->irq.ih.doorbell_index); -+ - pci_set_master(adev->pdev); - - /* enable interrupts */ -diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c -index 19bf717fd4cb6..5508ebb9eb434 100644 ---- a/drivers/gpu/drm/i915/display/icl_dsi.c -+++ b/drivers/gpu/drm/i915/display/icl_dsi.c -@@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, - /* FIXME: initialize from VBT */ - vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; - -+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; -+ - ret = intel_dsc_compute_params(crtc_state); - if (ret) - return ret; -diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c -index 41aaa6c98114f..fe8b6b72970a2 100644 ---- a/drivers/gpu/drm/i915/display/intel_dp.c -+++ b/drivers/gpu/drm/i915/display/intel_dp.c -@@ -1379,6 +1379,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, - * DP_DSC_RC_BUF_SIZE for this. - */ - vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; -+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; - - /* - * Slice Height of 8 works for all currently available panels. So start -diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c -index 43e1bbc1e3035..ca530f0733e0e 100644 ---- a/drivers/gpu/drm/i915/display/intel_vdsc.c -+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c -@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) - u8 i = 0; - - vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; -- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay; - vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, - pipe_config->dsc.slice_count); - -diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h -index 9feda105f9131..a7acffbf15d1f 100644 ---- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h -+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h -@@ -235,6 +235,14 @@ struct intel_guc { - * @shift: Right shift value for the gpm timestamp - */ - u32 shift; -+ -+ /** -+ * @last_stat_jiffies: jiffies at last actual stats collection time -+ * We use this timestamp to ensure we don't oversample the -+ * stats because runtime power management events can trigger -+ * stats collection at much higher rates than required. -+ */ -+ unsigned long last_stat_jiffies; - } timestamp; - - #ifdef CONFIG_DRM_I915_SELFTEST -diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c -index 26a051ef119df..d7e4681d7297c 100644 ---- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c -+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c -@@ -1365,6 +1365,8 @@ static void __update_guc_busyness_stats(struct intel_guc *guc) - unsigned long flags; - ktime_t unused; - -+ guc->timestamp.last_stat_jiffies = jiffies; -+ - spin_lock_irqsave(&guc->timestamp.lock, flags); - - guc_update_pm_timestamp(guc, &unused); -@@ -1436,7 +1438,23 @@ void intel_guc_busyness_park(struct intel_gt *gt) - if (!guc_submission_initialized(guc)) - return; - -- cancel_delayed_work(&guc->timestamp.work); -+ /* -+ * There is a race with suspend flow where the worker runs after suspend -+ * and causes an unclaimed register access warning. Cancel the worker -+ * synchronously here. -+ */ -+ cancel_delayed_work_sync(&guc->timestamp.work); -+ -+ /* -+ * Before parking, we should sample engine busyness stats if we need to. -+ * We can skip it if we are less than half a ping from the last time we -+ * sampled the busyness stats. -+ */ -+ if (guc->timestamp.last_stat_jiffies && -+ !time_after(jiffies, guc->timestamp.last_stat_jiffies + -+ (guc->timestamp.ping_delay / 2))) -+ return; -+ - __update_guc_busyness_stats(guc); - } - -diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h -index 4f5a51bb9e1e4..e77956ae88a4b 100644 ---- a/drivers/gpu/drm/i915/i915_reg.h -+++ b/drivers/gpu/drm/i915/i915_reg.h -@@ -1849,14 +1849,14 @@ - - #define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8) - #define GT0_PERF_LIMIT_REASONS_MASK 0xde3 --#define PROCHOT_MASK REG_BIT(1) --#define THERMAL_LIMIT_MASK REG_BIT(2) --#define RATL_MASK REG_BIT(6) --#define VR_THERMALERT_MASK REG_BIT(7) --#define VR_TDC_MASK REG_BIT(8) --#define POWER_LIMIT_4_MASK REG_BIT(9) --#define POWER_LIMIT_1_MASK REG_BIT(11) --#define POWER_LIMIT_2_MASK REG_BIT(12) -+#define PROCHOT_MASK REG_BIT(0) -+#define THERMAL_LIMIT_MASK REG_BIT(1) -+#define RATL_MASK REG_BIT(5) -+#define VR_THERMALERT_MASK REG_BIT(6) -+#define VR_TDC_MASK REG_BIT(7) -+#define POWER_LIMIT_4_MASK REG_BIT(8) -+#define POWER_LIMIT_1_MASK REG_BIT(10) -+#define POWER_LIMIT_2_MASK REG_BIT(11) - - #define CHV_CLK_CTL1 _MMIO(0x101100) - #define VLV_CLK_CTL2 _MMIO(0x101104) -diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c -index 16460b169ed21..2a32729a74b51 100644 ---- a/drivers/gpu/drm/i915/i915_vma.c -+++ b/drivers/gpu/drm/i915/i915_vma.c -@@ -1870,12 +1870,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma, - enum dma_resv_usage usage; - int idx; - -- obj->read_domains = 0; - if (flags & EXEC_OBJECT_WRITE) { - usage = DMA_RESV_USAGE_WRITE; - obj->write_domain = I915_GEM_DOMAIN_RENDER; -+ obj->read_domains = 0; - } else { - usage = DMA_RESV_USAGE_READ; -+ obj->write_domain = 0; - } - - dma_fence_array_for_each(curr, idx, fence) -diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c -index 8640a8a8a4691..44aa526294439 100644 ---- a/drivers/gpu/drm/meson/meson_plane.c -+++ b/drivers/gpu/drm/meson/meson_plane.c -@@ -168,7 +168,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, - - /* Enable OSD and BLK0, set max global alpha */ - priv->viu.osd1_ctrl_stat = OSD_ENABLE | -- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) | -+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) | - OSD_BLK0_ENABLE; - - priv->viu.osd1_ctrl_stat2 = readl(priv->io_base + -diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c -index bb7e109534de1..d4b907889a21d 100644 ---- a/drivers/gpu/drm/meson/meson_viu.c -+++ b/drivers/gpu/drm/meson/meson_viu.c -@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv, - priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12)); - writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff), - priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21)); -- writel((m[11] & 0x1fff) << 16, -+ writel((m[11] & 0x1fff), - priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22)); - - writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff), -diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c -index a189982601a48..e8040defe6073 100644 ---- a/drivers/gpu/drm/panel/panel-edp.c -+++ b/drivers/gpu/drm/panel/panel-edp.c -@@ -1270,7 +1270,8 @@ static const struct panel_desc innolux_n116bca_ea1 = { - }, - .delay = { - .hpd_absent = 200, -- .prepare_to_enable = 80, -+ .enable = 80, -+ .disable = 50, - .unprepare = 500, - }, - }; -diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c -index d6e831576cd2b..88271f04615b0 100644 ---- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c -+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c -@@ -1436,11 +1436,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id, - die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_HDMI | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id); -+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL; -+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_EDP0: - die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX; - die |= RK3568_SYS_DSP_INFACE_EN_EDP | - FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id); -+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL; -+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags); - break; - case ROCKCHIP_VOP2_EP_MIPI0: - die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX; -diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c -index fc8c1420c0b69..64b14ac4c7b02 100644 ---- a/drivers/iommu/intel/dmar.c -+++ b/drivers/iommu/intel/dmar.c -@@ -2368,13 +2368,6 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert) - if (!dmar_in_use()) - return 0; - -- /* -- * It's unlikely that any I/O board is hot added before the IOMMU -- * subsystem is initialized. -- */ -- if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled) -- return -EOPNOTSUPP; -- - if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { - tmp = handle; - } else { -diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c -index c0464959cbcdb..861a239d905a4 100644 ---- a/drivers/iommu/intel/iommu.c -+++ b/drivers/iommu/intel/iommu.c -@@ -3133,7 +3133,13 @@ static int __init init_dmars(void) - - #ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { -+ /* -+ * Call dmar_alloc_hwirq() with dmar_global_lock held, -+ * could cause possible lock race condition. -+ */ -+ up_write(&dmar_global_lock); - ret = intel_svm_enable_prq(iommu); -+ down_write(&dmar_global_lock); - if (ret) - goto free_iommu; - } -@@ -4039,6 +4045,7 @@ int __init intel_iommu_init(void) - force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) || - platform_optin_force_iommu(); - -+ down_write(&dmar_global_lock); - if (dmar_table_init()) { - if (force_on) - panic("tboot: Failed to initialize DMAR table\n"); -@@ -4051,6 +4058,16 @@ int __init intel_iommu_init(void) - goto out_free_dmar; - } - -+ up_write(&dmar_global_lock); -+ -+ /* -+ * The bus notifier takes the dmar_global_lock, so lockdep will -+ * complain later when we register it under the lock. -+ */ -+ dmar_register_bus_notifier(); -+ -+ down_write(&dmar_global_lock); -+ - if (!no_iommu) - intel_iommu_debugfs_init(); - -@@ -4098,9 +4115,11 @@ int __init intel_iommu_init(void) - pr_err("Initialization failed\n"); - goto out_free_dmar; - } -+ up_write(&dmar_global_lock); - - init_iommu_pm_ops(); - -+ down_read(&dmar_global_lock); - for_each_active_iommu(iommu, drhd) { - /* - * The flush queue implementation does not perform -@@ -4118,11 +4137,13 @@ int __init intel_iommu_init(void) - "%s", iommu->name); - iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); - } -+ up_read(&dmar_global_lock); - - bus_set_iommu(&pci_bus_type, &intel_iommu_ops); - if (si_domain && !hw_pass_through) - register_memory_notifier(&intel_iommu_memory_nb); - -+ down_read(&dmar_global_lock); - if (probe_acpi_namespace_devices()) - pr_warn("ACPI name space devices didn't probe correctly\n"); - -@@ -4133,15 +4154,17 @@ int __init intel_iommu_init(void) - - iommu_disable_protect_mem_regions(iommu); - } -+ up_read(&dmar_global_lock); - -- intel_iommu_enabled = 1; -- dmar_register_bus_notifier(); - pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); - -+ intel_iommu_enabled = 1; -+ - return 0; - - out_free_dmar: - intel_iommu_free_dmars(); -+ up_write(&dmar_global_lock); - return ret; - } - -diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c -index 520ed965bb7a4..583ca847a39cb 100644 ---- a/drivers/of/fdt.c -+++ b/drivers/of/fdt.c -@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob, - for (offset = 0; - offset >= 0 && depth >= initial_depth; - offset = fdt_next_node(blob, offset, &depth)) { -- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) -+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) - continue; - - if (!IS_ENABLED(CONFIG_OF_KOBJ) && -diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c -index f69ab90b5e22d..6052f264bbb0a 100644 ---- a/drivers/parisc/ccio-dma.c -+++ b/drivers/parisc/ccio-dma.c -@@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev) - } - ccio_ioc_init(ioc); - if (ccio_init_resources(ioc)) { -+ iounmap(ioc->ioc_regs); - kfree(ioc); - return -ENOMEM; - } -diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c -index 6bec7f1431348..704a99d2f93ce 100644 ---- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c -+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c -@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187); - DECLARE_MSM_GPIO_PINS(188); - DECLARE_MSM_GPIO_PINS(189); - --static const unsigned int sdc2_clk_pins[] = { 190 }; --static const unsigned int sdc2_cmd_pins[] = { 191 }; --static const unsigned int sdc2_data_pins[] = { 192 }; --static const unsigned int ufs_reset_pins[] = { 193 }; -+static const unsigned int ufs_reset_pins[] = { 190 }; -+static const unsigned int sdc2_clk_pins[] = { 191 }; -+static const unsigned int sdc2_cmd_pins[] = { 192 }; -+static const unsigned int sdc2_data_pins[] = { 193 }; - - enum sc8180x_functions { - msm_mux_adsp_ext, -@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = { - static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = { - { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 }, - { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 }, -- { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 }, -+ { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 }, - { 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 }, - { 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 }, - { 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 }, -diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c -index 21054fcacd345..18088f6f44b23 100644 ---- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c -+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c -@@ -98,7 +98,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match); - static struct platform_driver a100_r_pinctrl_driver = { - .probe = a100_r_pinctrl_probe, - .driver = { -- .name = "sun50iw10p1-r-pinctrl", -+ .name = "sun50i-a100-r-pinctrl", - .of_match_table = a100_r_pinctrl_match, - }, - }; -diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c -index 386bb523c69ea..bdc3efdb12219 100644 ---- a/fs/cifs/connect.c -+++ b/fs/cifs/connect.c -@@ -707,9 +707,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) - int length = 0; - int total_read; - -- smb_msg->msg_control = NULL; -- smb_msg->msg_controllen = 0; -- - for (total_read = 0; msg_data_left(smb_msg); total_read += length) { - try_to_freeze(); - -@@ -765,7 +762,7 @@ int - cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, - unsigned int to_read) - { -- struct msghdr smb_msg; -+ struct msghdr smb_msg = {}; - struct kvec iov = {.iov_base = buf, .iov_len = to_read}; - iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read); - -@@ -775,15 +772,13 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, - ssize_t - cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read) - { -- struct msghdr smb_msg; -+ struct msghdr smb_msg = {}; - - /* - * iov_iter_discard already sets smb_msg.type and count and iov_offset - * and cifs_readv_from_socket sets msg_control and msg_controllen - * so little to initialize in struct msghdr - */ -- smb_msg.msg_name = NULL; -- smb_msg.msg_namelen = 0; - iov_iter_discard(&smb_msg.msg_iter, READ, to_read); - - return cifs_readv_from_socket(server, &smb_msg); -@@ -793,7 +788,7 @@ int - cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, - unsigned int page_offset, unsigned int to_read) - { -- struct msghdr smb_msg; -+ struct msghdr smb_msg = {}; - struct bio_vec bv = { - .bv_page = page, .bv_len = to_read, .bv_offset = page_offset}; - iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read); -diff --git a/fs/cifs/file.c b/fs/cifs/file.c -index 0f03c0bfdf280..02dd591acabb3 100644 ---- a/fs/cifs/file.c -+++ b/fs/cifs/file.c -@@ -3327,6 +3327,9 @@ static ssize_t __cifs_writev( - - ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) - { -+ struct file *file = iocb->ki_filp; -+ -+ cifs_revalidate_mapping(file->f_inode); - return __cifs_writev(iocb, from, true); - } - -diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c -index bfc9bd55870a0..8adc0f2a59518 100644 ---- a/fs/cifs/transport.c -+++ b/fs/cifs/transport.c -@@ -196,10 +196,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, - - *sent = 0; - -- smb_msg->msg_name = (struct sockaddr *) &server->dstaddr; -- smb_msg->msg_namelen = sizeof(struct sockaddr); -- smb_msg->msg_control = NULL; -- smb_msg->msg_controllen = 0; - if (server->noblocksnd) - smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; - else -@@ -311,7 +307,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, - sigset_t mask, oldmask; - size_t total_len = 0, sent, size; - struct socket *ssocket = server->ssocket; -- struct msghdr smb_msg; -+ struct msghdr smb_msg = {}; - __be32 rfc1002_marker; - - if (cifs_rdma_enabled(server)) { -diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h -index 8f8cd6e2d4dbc..597e3ce3f148a 100644 ---- a/fs/nfs/internal.h -+++ b/fs/nfs/internal.h -@@ -604,6 +604,31 @@ static inline gfp_t nfs_io_gfp_mask(void) - return GFP_KERNEL; - } - -+/* -+ * Special version of should_remove_suid() that ignores capabilities. -+ */ -+static inline int nfs_should_remove_suid(const struct inode *inode) -+{ -+ umode_t mode = inode->i_mode; -+ int kill = 0; -+ -+ /* suid always must be killed */ -+ if (unlikely(mode & S_ISUID)) -+ kill = ATTR_KILL_SUID; -+ -+ /* -+ * sgid without any exec bits is just a mandatory locking mark; leave -+ * it alone. If some exec bits are set, it's a real sgid; kill it. -+ */ -+ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) -+ kill |= ATTR_KILL_SGID; -+ -+ if (unlikely(kill && S_ISREG(mode))) -+ return kill; -+ -+ return 0; -+} -+ - /* unlink.c */ - extern struct rpc_task * - nfs_async_rename(struct inode *old_dir, struct inode *new_dir, -diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c -index 068c45b3bc1ab..6dab9e4083729 100644 ---- a/fs/nfs/nfs42proc.c -+++ b/fs/nfs/nfs42proc.c -@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, - - status = nfs4_call_sync(server->client, server, msg, - &args.seq_args, &res.seq_res, 0); -- if (status == 0) -+ if (status == 0) { -+ if (nfs_should_remove_suid(inode)) { -+ spin_lock(&inode->i_lock); -+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); -+ spin_unlock(&inode->i_lock); -+ } - status = nfs_post_op_update_inode_force_wcc(inode, - res.falloc_fattr); -- -+ } - if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) - trace_nfs4_fallocate(inode, &args, status); - else -diff --git a/fs/nfs/super.c b/fs/nfs/super.c -index 6ab5eeb000dc0..5e4bacb77bfc7 100644 ---- a/fs/nfs/super.c -+++ b/fs/nfs/super.c -@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx) - if (ctx->bsize) - sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits); - -- if (server->nfs_client->rpc_ops->version != 2) { -- /* The VFS shouldn't apply the umask to mode bits. We will do -- * so ourselves when necessary. -+ switch (server->nfs_client->rpc_ops->version) { -+ case 2: -+ sb->s_time_gran = 1000; -+ sb->s_time_min = 0; -+ sb->s_time_max = U32_MAX; -+ break; -+ case 3: -+ /* -+ * The VFS shouldn't apply the umask to mode bits. -+ * We will do so ourselves when necessary. - */ - sb->s_flags |= SB_POSIXACL; - sb->s_time_gran = 1; -- sb->s_export_op = &nfs_export_ops; -- } else -- sb->s_time_gran = 1000; -- -- if (server->nfs_client->rpc_ops->version != 4) { - sb->s_time_min = 0; - sb->s_time_max = U32_MAX; -- } else { -+ sb->s_export_op = &nfs_export_ops; -+ break; -+ case 4: -+ sb->s_flags |= SB_POSIXACL; -+ sb->s_time_gran = 1; - sb->s_time_min = S64_MIN; - sb->s_time_max = S64_MAX; -+ if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) -+ sb->s_export_op = &nfs_export_ops; -+ break; - } - - sb->s_magic = NFS_SUPER_MAGIC; -diff --git a/fs/nfs/write.c b/fs/nfs/write.c -index 5d7e1c2061842..4212473c69ee9 100644 ---- a/fs/nfs/write.c -+++ b/fs/nfs/write.c -@@ -1497,31 +1497,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata) - NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); - } - --/* -- * Special version of should_remove_suid() that ignores capabilities. -- */ --static int nfs_should_remove_suid(const struct inode *inode) --{ -- umode_t mode = inode->i_mode; -- int kill = 0; -- -- /* suid always must be killed */ -- if (unlikely(mode & S_ISUID)) -- kill = ATTR_KILL_SUID; -- -- /* -- * sgid without any exec bits is just a mandatory locking mark; leave -- * it alone. If some exec bits are set, it's a real sgid; kill it. -- */ -- if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) -- kill |= ATTR_KILL_SGID; -- -- if (unlikely(kill && S_ISREG(mode))) -- return kill; -- -- return 0; --} -- - static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, - struct nfs_fattr *fattr) - { -diff --git a/include/linux/dmar.h b/include/linux/dmar.h -index f3a3d95df5325..cbd714a198a0a 100644 ---- a/include/linux/dmar.h -+++ b/include/linux/dmar.h -@@ -69,7 +69,6 @@ struct dmar_pci_notify_info { - - extern struct rw_semaphore dmar_global_lock; - extern struct list_head dmar_drhd_units; --extern int intel_iommu_enabled; - - #define for_each_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ -@@ -93,8 +92,7 @@ extern int intel_iommu_enabled; - static inline bool dmar_rcu_check(void) - { - return rwsem_is_locked(&dmar_global_lock) || -- system_state == SYSTEM_BOOTING || -- (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled); -+ system_state == SYSTEM_BOOTING; - } - - #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) -diff --git a/include/linux/of_device.h b/include/linux/of_device.h -index 1d7992a02e36e..1a803e4335d30 100644 ---- a/include/linux/of_device.h -+++ b/include/linux/of_device.h -@@ -101,8 +101,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) - } - - static inline int of_dma_configure_id(struct device *dev, -- struct device_node *np, -- bool force_dma) -+ struct device_node *np, -+ bool force_dma, -+ const u32 *id) - { - return 0; - } -diff --git a/include/net/xfrm.h b/include/net/xfrm.h -index c39d910d4b454..9ca397eed1638 100644 ---- a/include/net/xfrm.h -+++ b/include/net/xfrm.h -@@ -1195,6 +1195,8 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); - - static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) - { -+ if (!sk_fullsock(osk)) -+ return 0; - sk->sk_policy[0] = NULL; - sk->sk_policy[1] = NULL; - if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) -diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c -index 48833d0edd089..602da2cfd57c8 100644 ---- a/io_uring/io_uring.c -+++ b/io_uring/io_uring.c -@@ -5061,7 +5061,8 @@ done: - req_set_fail(req); - __io_req_complete(req, issue_flags, ret, 0); - /* put file to avoid an attempt to IOPOLL the req */ -- io_put_file(req->file); -+ if (!(req->flags & REQ_F_FIXED_FILE)) -+ io_put_file(req->file); - req->file = NULL; - return 0; - } -diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c -index afc6c0e9c966e..f93983910b5e1 100644 ---- a/kernel/cgroup/cgroup-v1.c -+++ b/kernel/cgroup/cgroup-v1.c -@@ -59,6 +59,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) - int retval = 0; - - mutex_lock(&cgroup_mutex); -+ cpus_read_lock(); - percpu_down_write(&cgroup_threadgroup_rwsem); - for_each_root(root) { - struct cgroup *from_cgrp; -@@ -72,6 +73,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) - break; - } - percpu_up_write(&cgroup_threadgroup_rwsem); -+ cpus_read_unlock(); - mutex_unlock(&cgroup_mutex); - - return retval; -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c -index da8b3cc67234d..028eb28c7882d 100644 ---- a/net/ipv4/ip_output.c -+++ b/net/ipv4/ip_output.c -@@ -1704,7 +1704,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, - tcp_hdr(skb)->source, tcp_hdr(skb)->dest, - arg->uid); - security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); -- rt = ip_route_output_key(net, &fl4); -+ rt = ip_route_output_flow(net, &fl4, sk); - if (IS_ERR(rt)) - return; - -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index 586c102ce152d..9fd92e263d0a3 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -819,6 +819,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) - ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? - inet_twsk(sk)->tw_priority : sk->sk_priority; - transmit_time = tcp_transmit_time(sk); -+ xfrm_sk_clone_policy(ctl_sk, sk); - } - ip_send_unicast_reply(ctl_sk, - skb, &TCP_SKB_CB(skb)->header.h4.opt, -@@ -827,6 +828,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) - transmit_time); - - ctl_sk->sk_mark = 0; -+ xfrm_sk_free_policy(ctl_sk); - sock_net_set(ctl_sk, &init_net); - __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); - __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); -diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c -index be09941fe6d9a..5eabe746cfa76 100644 ---- a/net/ipv6/tcp_ipv6.c -+++ b/net/ipv6/tcp_ipv6.c -@@ -952,7 +952,10 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 - * Underlying function will use this to retrieve the network - * namespace - */ -- dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); -+ if (sk && sk->sk_state != TCP_TIME_WAIT) -+ dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/ -+ else -+ dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL); - if (!IS_ERR(dst)) { - skb_dst_set(buff, dst); - ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, -diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c -index c1a01947530f0..db8c0de1de422 100644 ---- a/net/sunrpc/clnt.c -+++ b/net/sunrpc/clnt.c -@@ -2858,6 +2858,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, - - task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, - &rpc_cb_add_xprt_call_ops, data); -+ if (IS_ERR(task)) -+ return PTR_ERR(task); -+ - data->xps->xps_nunique_destaddr_xprts++; - rpc_put_task(task); - success: -diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c -index 53b024cea3b3e..5ecafffe7ce59 100644 ---- a/net/sunrpc/xprt.c -+++ b/net/sunrpc/xprt.c -@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task) - { - struct rpc_rqst *req = task->tk_rqstp; - -- if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) { -+ if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) - xprt_request_rb_remove(req->rq_xprt, req); -- xdr_free_bvec(&req->rq_rcv_buf); -- req->rq_private_buf.bvec = NULL; -- } - } - - /** -@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) - - xprt->stat.recvs++; - -+ xdr_free_bvec(&req->rq_rcv_buf); -+ req->rq_private_buf.bvec = NULL; - req->rq_private_buf.len = copied; - /* Ensure all writes are done before we update */ - /* req->rq_reply_bytes_recvd */ -@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task) - xprt_request_dequeue_transmit_locked(task); - xprt_request_dequeue_receive_locked(task); - spin_unlock(&xprt->queue_lock); -+ xdr_free_bvec(&req->rq_rcv_buf); - } - } - -diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c -index 61df4d33c48ff..7f340f18599c9 100644 ---- a/sound/pci/hda/patch_sigmatel.c -+++ b/sound/pci/hda/patch_sigmatel.c -@@ -209,6 +209,7 @@ struct sigmatel_spec { - - /* beep widgets */ - hda_nid_t anabeep_nid; -+ bool beep_power_on; - - /* SPDIF-out mux */ - const char * const *spdif_labels; -@@ -4443,6 +4444,28 @@ static int stac_suspend(struct hda_codec *codec) - - return 0; - } -+ -+static int stac_check_power_status(struct hda_codec *codec, hda_nid_t nid) -+{ -+#ifdef CONFIG_SND_HDA_INPUT_BEEP -+ struct sigmatel_spec *spec = codec->spec; -+#endif -+ int ret = snd_hda_gen_check_power_status(codec, nid); -+ -+#ifdef CONFIG_SND_HDA_INPUT_BEEP -+ if (nid == spec->gen.beep_nid && codec->beep) { -+ if (codec->beep->enabled != spec->beep_power_on) { -+ spec->beep_power_on = codec->beep->enabled; -+ if (spec->beep_power_on) -+ snd_hda_power_up_pm(codec); -+ else -+ snd_hda_power_down_pm(codec); -+ } -+ ret |= spec->beep_power_on; -+ } -+#endif -+ return ret; -+} - #else - #define stac_suspend NULL - #endif /* CONFIG_PM */ -@@ -4455,6 +4478,7 @@ static const struct hda_codec_ops stac_patch_ops = { - .unsol_event = snd_hda_jack_unsol_event, - #ifdef CONFIG_PM - .suspend = stac_suspend, -+ .check_power_status = stac_check_power_status, - #endif - }; - -diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h -index d30439b4b8ab4..869379f91fe48 100644 ---- a/tools/include/uapi/asm/errno.h -+++ b/tools/include/uapi/asm/errno.h -@@ -9,8 +9,8 @@ - #include "../../../arch/alpha/include/uapi/asm/errno.h" - #elif defined(__mips__) - #include "../../../arch/mips/include/uapi/asm/errno.h" --#elif defined(__xtensa__) --#include "../../../arch/xtensa/include/uapi/asm/errno.h" -+#elif defined(__hppa__) -+#include "../../../arch/parisc/include/uapi/asm/errno.h" - #else - #include - #endif diff --git a/sys-kernel/pinephone-sources/files/5.19.11-12.patch b/sys-kernel/pinephone-sources/files/5.19.11-12.patch deleted file mode 100644 index 8c6e32f..0000000 --- a/sys-kernel/pinephone-sources/files/5.19.11-12.patch +++ /dev/null @@ -1,9776 +0,0 @@ -diff --git a/Makefile b/Makefile -index 01463a22926d5..7df4c195c8ab2 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 19 --SUBLEVEL = 11 -+SUBLEVEL = 12 - EXTRAVERSION = - NAME = Superb Owl - -diff --git a/arch/arm/boot/dts/lan966x.dtsi b/arch/arm/boot/dts/lan966x.dtsi -index 38e90a31d2dd1..25c19f9d0a12f 100644 ---- a/arch/arm/boot/dts/lan966x.dtsi -+++ b/arch/arm/boot/dts/lan966x.dtsi -@@ -515,13 +515,13 @@ - - phy0: ethernet-phy@1 { - reg = <1>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - }; - - phy1: ethernet-phy@2 { - reg = <2>; -- interrupts = ; -+ interrupts = ; - status = "disabled"; - }; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts -index 92eaf4ef45638..57ecdfa0dfc09 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts -@@ -152,11 +152,11 @@ - * CPLD_reset is RESET_SOFT in schematic - */ - gpio-line-names = -- "CPLD_D[1]", "CPLD_int", "CPLD_reset", "", -- "", "CPLD_D[0]", "", "", -- "", "", "", "CPLD_D[2]", -- "CPLD_D[3]", "CPLD_D[4]", "CPLD_D[5]", "CPLD_D[6]", -- "CPLD_D[7]", "", "", "", -+ "CPLD_D[6]", "CPLD_int", "CPLD_reset", "", -+ "", "CPLD_D[7]", "", "", -+ "", "", "", "CPLD_D[5]", -+ "CPLD_D[4]", "CPLD_D[3]", "CPLD_D[2]", "CPLD_D[1]", -+ "CPLD_D[0]", "", "", "", - "", "", "", "", - "", "", "", "KBD_intK", - "", "", "", ""; -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts -index 286d2df01cfa7..7e0aeb2db3054 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml-mba8mx.dts -@@ -5,7 +5,6 @@ - - /dts-v1/; - --#include - #include "imx8mm-tqma8mqml.dtsi" - #include "mba8mx.dtsi" - -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi -index 16ee9b5179e6e..f649dfacb4b69 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi -@@ -3,6 +3,7 @@ - * Copyright 2020-2021 TQ-Systems GmbH - */ - -+#include - #include "imx8mm.dtsi" - - / { -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi -index c2d4da25482ff..44b473494d0f5 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi -@@ -359,8 +359,8 @@ - nxp,dvs-standby-voltage = <850000>; - regulator-always-on; - regulator-boot-on; -- regulator-max-microvolt = <950000>; -- regulator-min-microvolt = <850000>; -+ regulator-max-microvolt = <1050000>; -+ regulator-min-microvolt = <805000>; - regulator-name = "On-module +VDD_ARM (BUCK2)"; - regulator-ramp-delay = <3125>; - }; -@@ -368,8 +368,8 @@ - reg_vdd_dram: BUCK3 { - regulator-always-on; - regulator-boot-on; -- regulator-max-microvolt = <950000>; -- regulator-min-microvolt = <850000>; -+ regulator-max-microvolt = <1000000>; -+ regulator-min-microvolt = <805000>; - regulator-name = "On-module +VDD_GPU_VPU_DDR (BUCK3)"; - }; - -@@ -408,7 +408,7 @@ - reg_vdd_snvs: LDO2 { - regulator-always-on; - regulator-boot-on; -- regulator-max-microvolt = <900000>; -+ regulator-max-microvolt = <800000>; - regulator-min-microvolt = <800000>; - regulator-name = "On-module +V0.8_SNVS (LDO2)"; - }; -diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi -index e41e1d56f980d..7bd4eecd592ef 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi -@@ -672,7 +672,6 @@ - <&clk IMX8MN_CLK_GPU_SHADER>, - <&clk IMX8MN_CLK_GPU_BUS_ROOT>, - <&clk IMX8MN_CLK_GPU_AHB>; -- resets = <&src IMX8MQ_RESET_GPU_RESET>; - }; - - pgc_dispmix: power-domain@3 { -diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts -index 6630ec561dc25..211e6a1b296e1 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts -@@ -123,8 +123,7 @@ - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_can>; - regulator-name = "can2_stby"; -- gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>; -- enable-active-high; -+ gpio = <&gpio3 19 GPIO_ACTIVE_LOW>; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - }; -@@ -484,35 +483,40 @@ - lan1: port@0 { - reg = <0>; - label = "lan1"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan2: port@1 { - reg = <1>; - label = "lan2"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan3: port@2 { - reg = <2>; - label = "lan3"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan4: port@3 { - reg = <3>; - label = "lan4"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan5: port@4 { - reg = <4>; - label = "lan5"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - -- port@6 { -- reg = <6>; -+ port@5 { -+ reg = <5>; - label = "cpu"; - ethernet = <&fec>; - phy-mode = "rgmii-id"; -diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi -index 09f7364dd1d05..1cd389b1b95d6 100644 ---- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi -@@ -172,6 +172,7 @@ - compatible = "fsl,imx8ulp-pcc3"; - reg = <0x292d0000 0x10000>; - #clock-cells = <1>; -+ #reset-cells = <1>; - }; - - tpm5: tpm@29340000 { -@@ -270,6 +271,7 @@ - compatible = "fsl,imx8ulp-pcc4"; - reg = <0x29800000 0x10000>; - #clock-cells = <1>; -+ #reset-cells = <1>; - }; - - lpi2c6: i2c@29840000 { -@@ -414,6 +416,7 @@ - compatible = "fsl,imx8ulp-pcc5"; - reg = <0x2da70000 0x10000>; - #clock-cells = <1>; -+ #reset-cells = <1>; - }; - }; - -diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi -index 7249871530ab9..5eecbefa8a336 100644 ---- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi -+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi -@@ -2,8 +2,8 @@ - /* - * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd - * Copyright (c) 2020 Engicam srl -- * Copyright (c) 2020 Amarula Solutons -- * Copyright (c) 2020 Amarula Solutons(India) -+ * Copyright (c) 2020 Amarula Solutions -+ * Copyright (c) 2020 Amarula Solutions(India) - */ - - #include -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts -index 31ebb4e5fd330..0f9cc042d9bf0 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts -@@ -88,3 +88,8 @@ - }; - }; - }; -+ -+&wlan_host_wake_l { -+ /* Kevin has an external pull up, but Bob does not. */ -+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>; -+}; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi -index 50d459ee4831c..af5810e5f5b79 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi -@@ -244,6 +244,14 @@ - &edp { - status = "okay"; - -+ /* -+ * eDP PHY/clk don't sync reliably at anything other than 24 MHz. Only -+ * set this here, because rk3399-gru.dtsi ensures we can generate this -+ * off GPLL=600MHz, whereas some other RK3399 boards may not. -+ */ -+ assigned-clocks = <&cru PCLK_EDP>; -+ assigned-clock-rates = <24000000>; -+ - ports { - edp_out: port@1 { - reg = <1>; -@@ -578,6 +586,7 @@ ap_i2c_tp: &i2c5 { - }; - - wlan_host_wake_l: wlan-host-wake-l { -+ /* Kevin has an external pull up, but Bob does not */ - rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>; - }; - }; -diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi -index b1ac3a89f259c..aa3e21bd6c8f4 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi -+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi -@@ -62,7 +62,6 @@ - vcc5v0_host: vcc5v0-host-regulator { - compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; -- enable-active-low; - pinctrl-names = "default"; - pinctrl-0 = <&vcc5v0_host_en>; - regulator-name = "vcc5v0_host"; -diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts -index fa953b7366421..fdbfdf3634e43 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts -@@ -163,7 +163,6 @@ - - vcc3v3_sd: vcc3v3_sd { - compatible = "regulator-fixed"; -- enable-active-low; - gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>; - pinctrl-names = "default"; - pinctrl-0 = <&vcc_sd_h>; -diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts -index 02d5f5a8ca036..528bb4e8ac776 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts -@@ -506,7 +506,7 @@ - disable-wp; - pinctrl-names = "default"; - pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>; -- sd-uhs-sdr104; -+ sd-uhs-sdr50; - vmmc-supply = <&vcc3v3_sd>; - vqmmc-supply = <&vccio_sd>; - status = "okay"; -diff --git a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts -index 622be8be9813d..282f5c74d5cda 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts -@@ -618,7 +618,7 @@ - }; - - &usb2phy0_otg { -- vbus-supply = <&vcc5v0_usb_otg>; -+ phy-supply = <&vcc5v0_usb_otg>; - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts -index 0813c0c5abded..26912f02684ce 100644 ---- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts -+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts -@@ -543,7 +543,7 @@ - }; - - &usb2phy0_otg { -- vbus-supply = <&vcc5v0_usb_otg>; -+ phy-supply = <&vcc5v0_usb_otg>; - status = "okay"; - }; - -diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c -index 707b5451929d4..d4abb948eb14e 100644 ---- a/arch/arm64/kernel/topology.c -+++ b/arch/arm64/kernel/topology.c -@@ -251,7 +251,7 @@ static void amu_fie_setup(const struct cpumask *cpus) - for_each_cpu(cpu, cpus) { - if (!freq_counters_valid(cpu) || - freq_inv_set_max_ratio(cpu, -- cpufreq_get_hw_max_freq(cpu) * 1000, -+ cpufreq_get_hw_max_freq(cpu) * 1000ULL, - arch_timer_get_rate())) - return; - } -diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c -index 7a623684d9b5e..2d5a0bcb0cec1 100644 ---- a/arch/mips/lantiq/clk.c -+++ b/arch/mips/lantiq/clk.c -@@ -50,6 +50,7 @@ struct clk *clk_get_io(void) - { - return &cpu_clk_generic[2]; - } -+EXPORT_SYMBOL_GPL(clk_get_io); - - struct clk *clk_get_ppe(void) - { -diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c -index 794c96c2a4cdd..311dc1580bbde 100644 ---- a/arch/mips/loongson32/common/platform.c -+++ b/arch/mips/loongson32/common/platform.c -@@ -98,7 +98,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - if (plat_dat->bus_id) { - __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 | - GMAC1_USE_UART0, LS1X_MUX_CTRL0); -- switch (plat_dat->interface) { -+ switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23); - break; -@@ -107,12 +107,12 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - break; - default: - pr_err("unsupported mii mode %d\n", -- plat_dat->interface); -+ plat_dat->phy_interface); - return -ENOTSUPP; - } - val &= ~GMAC1_SHUT; - } else { -- switch (plat_dat->interface) { -+ switch (plat_dat->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01); - break; -@@ -121,7 +121,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - break; - default: - pr_err("unsupported mii mode %d\n", -- plat_dat->interface); -+ plat_dat->phy_interface); - return -ENOTSUPP; - } - val &= ~GMAC0_SHUT; -@@ -131,7 +131,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) - plat_dat = dev_get_platdata(&pdev->dev); - - val &= ~PHY_INTF_SELI; -- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) -+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) - val |= 0x4 << PHY_INTF_SELI_SHIFT; - __raw_writel(val, LS1X_MUX_CTRL1); - -@@ -146,9 +146,9 @@ static struct plat_stmmacenet_data ls1x_eth0_pdata = { - .bus_id = 0, - .phy_addr = -1, - #if defined(CONFIG_LOONGSON1_LS1B) -- .interface = PHY_INTERFACE_MODE_MII, -+ .phy_interface = PHY_INTERFACE_MODE_MII, - #elif defined(CONFIG_LOONGSON1_LS1C) -- .interface = PHY_INTERFACE_MODE_RMII, -+ .phy_interface = PHY_INTERFACE_MODE_RMII, - #endif - .mdio_bus_data = &ls1x_mdio_bus_data, - .dma_cfg = &ls1x_eth_dma_cfg, -@@ -186,7 +186,7 @@ struct platform_device ls1x_eth0_pdev = { - static struct plat_stmmacenet_data ls1x_eth1_pdata = { - .bus_id = 1, - .phy_addr = -1, -- .interface = PHY_INTERFACE_MODE_MII, -+ .phy_interface = PHY_INTERFACE_MODE_MII, - .mdio_bus_data = &ls1x_mdio_bus_data, - .dma_cfg = &ls1x_eth_dma_cfg, - .has_gmac = 1, -diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig -index fcbb81feb7ad8..1f02f15569749 100644 ---- a/arch/riscv/Kconfig -+++ b/arch/riscv/Kconfig -@@ -361,6 +361,7 @@ config RISCV_ISA_C - config RISCV_ISA_SVPBMT - bool "SVPBMT extension support" - depends on 64BIT && MMU -+ depends on !XIP_KERNEL - select RISCV_ALTERNATIVE - default y - help -diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c -index 5a2de6b6f8822..5c591123c4409 100644 ---- a/arch/riscv/kernel/signal.c -+++ b/arch/riscv/kernel/signal.c -@@ -124,6 +124,8 @@ SYSCALL_DEFINE0(rt_sigreturn) - if (restore_altstack(&frame->uc.uc_stack)) - goto badframe; - -+ regs->cause = -1UL; -+ - return regs->a0; - - badframe: -diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c -index e0de60e503b98..d9e023c78f568 100644 ---- a/arch/um/kernel/um_arch.c -+++ b/arch/um/kernel/um_arch.c -@@ -33,7 +33,7 @@ - #include "um_arch.h" - - #define DEFAULT_COMMAND_LINE_ROOT "root=98:0" --#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty" -+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty0" - - /* Changed in add_arg and setup_arch, which run before SMP is started */ - static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 }; -diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h -index 4c0e812f2f044..19c04412f6e16 100644 ---- a/arch/x86/include/asm/kvm_host.h -+++ b/arch/x86/include/asm/kvm_host.h -@@ -713,6 +713,7 @@ struct kvm_vcpu_arch { - struct fpu_guest guest_fpu; - - u64 xcr0; -+ u64 guest_supported_xcr0; - - struct kvm_pio_request pio; - void *pio_data; -diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c -index de6d44e07e348..3ab498165639f 100644 ---- a/arch/x86/kvm/cpuid.c -+++ b/arch/x86/kvm/cpuid.c -@@ -283,7 +283,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) - { - struct kvm_lapic *apic = vcpu->arch.apic; - struct kvm_cpuid_entry2 *best; -- u64 guest_supported_xcr0; - - best = kvm_find_cpuid_entry(vcpu, 1, 0); - if (best && apic) { -@@ -295,10 +294,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) - kvm_apic_set_version(vcpu); - } - -- guest_supported_xcr0 = -+ vcpu->arch.guest_supported_xcr0 = - cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); - -- vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0; -+ /* -+ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if -+ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't -+ * supported by the host. -+ */ -+ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 | -+ XFEATURE_MASK_FPSSE; - - kvm_update_pv_runtime(vcpu); - -diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c -index 09fa8a94807bf..0c4a866813b31 100644 ---- a/arch/x86/kvm/emulate.c -+++ b/arch/x86/kvm/emulate.c -@@ -4134,6 +4134,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt) - { - u32 eax, ecx, edx; - -+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE)) -+ return emulate_ud(ctxt); -+ - eax = reg_read(ctxt, VCPU_REGS_RAX); - edx = reg_read(ctxt, VCPU_REGS_RDX); - ecx = reg_read(ctxt, VCPU_REGS_RCX); -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 5b36866528568..8c2815151864b 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -1025,15 +1025,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) - } - EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); - --static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu) --{ -- return vcpu->arch.guest_fpu.fpstate->user_xfeatures; --} -- - #ifdef CONFIG_X86_64 - static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) - { -- return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC; -+ return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; - } - #endif - -@@ -1056,7 +1051,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) - * saving. However, xcr0 bit 0 is always set, even if the - * emulated CPU does not support XSAVE (see kvm_vcpu_reset()). - */ -- valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP; -+ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; - if (xcr0 & ~valid_bits) - return 1; - -@@ -1084,6 +1079,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) - - int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) - { -+ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ - if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || - __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { - kvm_inject_gp(vcpu, 0); -diff --git a/block/blk-core.c b/block/blk-core.c -index cc6fbcb6d2521..7743c68177e89 100644 ---- a/block/blk-core.c -+++ b/block/blk-core.c -@@ -284,49 +284,6 @@ void blk_queue_start_drain(struct request_queue *q) - wake_up_all(&q->mq_freeze_wq); - } - --/** -- * blk_cleanup_queue - shutdown a request queue -- * @q: request queue to shutdown -- * -- * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and -- * put it. All future requests will be failed immediately with -ENODEV. -- * -- * Context: can sleep -- */ --void blk_cleanup_queue(struct request_queue *q) --{ -- /* cannot be called from atomic context */ -- might_sleep(); -- -- WARN_ON_ONCE(blk_queue_registered(q)); -- -- /* mark @q DYING, no new request or merges will be allowed afterwards */ -- blk_queue_flag_set(QUEUE_FLAG_DYING, q); -- blk_queue_start_drain(q); -- -- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); -- blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); -- -- /* -- * Drain all requests queued before DYING marking. Set DEAD flag to -- * prevent that blk_mq_run_hw_queues() accesses the hardware queues -- * after draining finished. -- */ -- blk_freeze_queue(q); -- -- blk_queue_flag_set(QUEUE_FLAG_DEAD, q); -- -- blk_sync_queue(q); -- if (queue_is_mq(q)) { -- blk_mq_cancel_work_sync(q); -- blk_mq_exit_queue(q); -- } -- -- /* @q is and will stay empty, shutdown and put */ -- blk_put_queue(q); --} --EXPORT_SYMBOL(blk_cleanup_queue); -- - /** - * blk_queue_enter() - try to increase q->q_usage_counter - * @q: request queue pointer -diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c -index 61f179e5f151a..28adb01f64419 100644 ---- a/block/blk-mq-debugfs.c -+++ b/block/blk-mq-debugfs.c -@@ -116,7 +116,6 @@ static const char *const blk_queue_flag_name[] = { - QUEUE_FLAG_NAME(NOXMERGES), - QUEUE_FLAG_NAME(ADD_RANDOM), - QUEUE_FLAG_NAME(SAME_FORCE), -- QUEUE_FLAG_NAME(DEAD), - QUEUE_FLAG_NAME(INIT_DONE), - QUEUE_FLAG_NAME(STABLE_WRITES), - QUEUE_FLAG_NAME(POLL), -@@ -151,11 +150,10 @@ static ssize_t queue_state_write(void *data, const char __user *buf, - char opbuf[16] = { }, *op; - - /* -- * The "state" attribute is removed after blk_cleanup_queue() has called -- * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid -- * triggering a use-after-free. -+ * The "state" attribute is removed when the queue is removed. Don't -+ * allow setting the state on a dying queue to avoid a use-after-free. - */ -- if (blk_queue_dead(q)) -+ if (blk_queue_dying(q)) - return -ENOENT; - - if (count >= sizeof(opbuf)) { -diff --git a/block/blk-mq.c b/block/blk-mq.c -index 0a299941c622e..69d0a58f9e2f1 100644 ---- a/block/blk-mq.c -+++ b/block/blk-mq.c -@@ -3896,7 +3896,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, - q->queuedata = queuedata; - ret = blk_mq_init_allocated_queue(set, q); - if (ret) { -- blk_cleanup_queue(q); -+ blk_put_queue(q); - return ERR_PTR(ret); - } - return q; -@@ -3908,6 +3908,35 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) - } - EXPORT_SYMBOL(blk_mq_init_queue); - -+/** -+ * blk_mq_destroy_queue - shutdown a request queue -+ * @q: request queue to shutdown -+ * -+ * This shuts down a request queue allocated by blk_mq_init_queue() and drops -+ * the initial reference. All future requests will failed with -ENODEV. -+ * -+ * Context: can sleep -+ */ -+void blk_mq_destroy_queue(struct request_queue *q) -+{ -+ WARN_ON_ONCE(!queue_is_mq(q)); -+ WARN_ON_ONCE(blk_queue_registered(q)); -+ -+ might_sleep(); -+ -+ blk_queue_flag_set(QUEUE_FLAG_DYING, q); -+ blk_queue_start_drain(q); -+ blk_freeze_queue(q); -+ -+ blk_sync_queue(q); -+ blk_mq_cancel_work_sync(q); -+ blk_mq_exit_queue(q); -+ -+ /* @q is and will stay empty, shutdown and put */ -+ blk_put_queue(q); -+} -+EXPORT_SYMBOL(blk_mq_destroy_queue); -+ - struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, - struct lock_class_key *lkclass) - { -@@ -3920,13 +3949,23 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, - - disk = __alloc_disk_node(q, set->numa_node, lkclass); - if (!disk) { -- blk_cleanup_queue(q); -+ blk_mq_destroy_queue(q); - return ERR_PTR(-ENOMEM); - } -+ set_bit(GD_OWNS_QUEUE, &disk->state); - return disk; - } - EXPORT_SYMBOL(__blk_mq_alloc_disk); - -+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, -+ struct lock_class_key *lkclass) -+{ -+ if (!blk_get_queue(q)) -+ return NULL; -+ return __alloc_disk_node(q, NUMA_NO_NODE, lkclass); -+} -+EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); -+ - static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( - struct blk_mq_tag_set *set, struct request_queue *q, - int hctx_idx, int node) -diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c -index 9b905e9443e49..84d7f87015673 100644 ---- a/block/blk-sysfs.c -+++ b/block/blk-sysfs.c -@@ -748,11 +748,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) - * decremented with blk_put_queue(). Once the refcount reaches 0 this function - * is called. - * -- * For drivers that have a request_queue on a gendisk and added with -- * __device_add_disk() the refcount to request_queue will reach 0 with -- * the last put_disk() called by the driver. For drivers which don't use -- * __device_add_disk() this happens with blk_cleanup_queue(). -- * - * Drivers exist which depend on the release of the request_queue to be - * synchronous, it should not be deferred. - * -diff --git a/block/blk.h b/block/blk.h -index 434017701403f..0d6668663ab5d 100644 ---- a/block/blk.h -+++ b/block/blk.h -@@ -411,6 +411,9 @@ int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start, - sector_t length); - void blk_drop_partitions(struct gendisk *disk); - -+struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, -+ struct lock_class_key *lkclass); -+ - int bio_add_hw_page(struct request_queue *q, struct bio *bio, - struct page *page, unsigned int len, unsigned int offset, - unsigned int max_sectors, bool *same_page); -diff --git a/block/bsg-lib.c b/block/bsg-lib.c -index acfe1357bf6c4..fd4cd5e682826 100644 ---- a/block/bsg-lib.c -+++ b/block/bsg-lib.c -@@ -324,7 +324,7 @@ void bsg_remove_queue(struct request_queue *q) - container_of(q->tag_set, struct bsg_set, tag_set); - - bsg_unregister_queue(bset->bd); -- blk_cleanup_queue(q); -+ blk_mq_destroy_queue(q); - blk_mq_free_tag_set(&bset->tag_set); - kfree(bset); - } -@@ -399,7 +399,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, - - return q; - out_cleanup_queue: -- blk_cleanup_queue(q); -+ blk_mq_destroy_queue(q); - out_queue: - blk_mq_free_tag_set(set); - out_tag_set: -diff --git a/block/genhd.c b/block/genhd.c -index 278227ba1d531..a39c416d658fd 100644 ---- a/block/genhd.c -+++ b/block/genhd.c -@@ -617,13 +617,14 @@ void del_gendisk(struct gendisk *disk) - * Fail any new I/O. - */ - set_bit(GD_DEAD, &disk->state); -+ if (test_bit(GD_OWNS_QUEUE, &disk->state)) -+ blk_queue_flag_set(QUEUE_FLAG_DYING, q); - set_capacity(disk, 0); - - /* - * Prevent new I/O from crossing bio_queue_enter(). - */ - blk_queue_start_drain(q); -- blk_mq_freeze_queue_wait(q); - - if (!(disk->flags & GENHD_FL_HIDDEN)) { - sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); -@@ -647,6 +648,8 @@ void del_gendisk(struct gendisk *disk) - pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); - device_del(disk_to_dev(disk)); - -+ blk_mq_freeze_queue_wait(q); -+ - blk_throtl_cancel_bios(disk->queue); - - blk_sync_queue(q); -@@ -663,11 +666,16 @@ void del_gendisk(struct gendisk *disk) - blk_mq_unquiesce_queue(q); - - /* -- * Allow using passthrough request again after the queue is torn down. -+ * If the disk does not own the queue, allow using passthrough requests -+ * again. Else leave the queue frozen to fail all I/O. - */ -- blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q); -- __blk_mq_unfreeze_queue(q, true); -- -+ if (!test_bit(GD_OWNS_QUEUE, &disk->state)) { -+ blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q); -+ __blk_mq_unfreeze_queue(q, true); -+ } else { -+ if (queue_is_mq(q)) -+ blk_mq_exit_queue(q); -+ } - } - EXPORT_SYMBOL(del_gendisk); - -@@ -1151,6 +1159,18 @@ static void disk_release(struct device *dev) - might_sleep(); - WARN_ON_ONCE(disk_live(disk)); - -+ /* -+ * To undo the all initialization from blk_mq_init_allocated_queue in -+ * case of a probe failure where add_disk is never called we have to -+ * call blk_mq_exit_queue here. We can't do this for the more common -+ * teardown case (yet) as the tagset can be gone by the time the disk -+ * is released once it was added. -+ */ -+ if (queue_is_mq(disk->queue) && -+ test_bit(GD_OWNS_QUEUE, &disk->state) && -+ !test_bit(GD_ADDED, &disk->state)) -+ blk_mq_exit_queue(disk->queue); -+ - blkcg_exit_queue(disk->queue); - - disk_release_events(disk); -@@ -1338,12 +1358,9 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, - { - struct gendisk *disk; - -- if (!blk_get_queue(q)) -- return NULL; -- - disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); - if (!disk) -- goto out_put_queue; -+ return NULL; - - disk->bdi = bdi_alloc(node_id); - if (!disk->bdi) -@@ -1387,11 +1404,8 @@ out_free_bdi: - bdi_put(disk->bdi); - out_free_disk: - kfree(disk); --out_put_queue: -- blk_put_queue(q); - return NULL; - } --EXPORT_SYMBOL(__alloc_disk_node); - - struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass) - { -@@ -1404,9 +1418,10 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass) - - disk = __alloc_disk_node(q, node, lkclass); - if (!disk) { -- blk_cleanup_queue(q); -+ blk_put_queue(q); - return NULL; - } -+ set_bit(GD_OWNS_QUEUE, &disk->state); - return disk; - } - EXPORT_SYMBOL(__blk_alloc_disk); -@@ -1418,6 +1433,9 @@ EXPORT_SYMBOL(__blk_alloc_disk); - * This decrements the refcount for the struct gendisk. When this reaches 0 - * we'll have disk_release() called. - * -+ * Note: for blk-mq disk put_disk must be called before freeing the tag_set -+ * when handling probe errors (that is before add_disk() is called). -+ * - * Context: Any context, but the last reference must not be dropped from - * atomic context. - */ -@@ -1439,7 +1457,6 @@ EXPORT_SYMBOL(put_disk); - */ - void blk_cleanup_disk(struct gendisk *disk) - { -- blk_cleanup_queue(disk->queue); - put_disk(disk); - } - EXPORT_SYMBOL(blk_cleanup_disk); -diff --git a/certs/Kconfig b/certs/Kconfig -index bf9b511573d75..1f109b0708778 100644 ---- a/certs/Kconfig -+++ b/certs/Kconfig -@@ -43,7 +43,7 @@ config SYSTEM_TRUSTED_KEYRING - bool "Provide system-wide ring of trusted keys" - depends on KEYS - depends on ASYMMETRIC_KEY_TYPE -- depends on X509_CERTIFICATE_PARSER -+ depends on X509_CERTIFICATE_PARSER = y - help - Provide a system keyring to which trusted keys can be added. Keys in - the keyring are considered to be trusted. Keys may be added at will -diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c -index e232cc4fd444b..c6e41ee18aaa2 100644 ---- a/drivers/block/ataflop.c -+++ b/drivers/block/ataflop.c -@@ -2045,7 +2045,6 @@ static void atari_floppy_cleanup(void) - if (!unit[i].disk[type]) - continue; - del_gendisk(unit[i].disk[type]); -- blk_cleanup_queue(unit[i].disk[type]->queue); - put_disk(unit[i].disk[type]); - } - blk_mq_free_tag_set(&unit[i].tag_set); -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index a59910ef948e9..1c036ef686fbb 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -2062,7 +2062,6 @@ static void loop_remove(struct loop_device *lo) - { - /* Make this loop device unreachable from pathname. */ - del_gendisk(lo->lo_disk); -- blk_cleanup_queue(lo->lo_disk->queue); - blk_mq_free_tag_set(&lo->tag_set); - - mutex_lock(&loop_ctl_mutex); -diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c -index 6699e4b2f7f43..06994a35acc7a 100644 ---- a/drivers/block/mtip32xx/mtip32xx.c -+++ b/drivers/block/mtip32xx/mtip32xx.c -@@ -3677,7 +3677,6 @@ static int mtip_block_shutdown(struct driver_data *dd) - if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) - del_gendisk(dd->disk); - -- blk_cleanup_queue(dd->queue); - blk_mq_free_tag_set(&dd->tags); - put_disk(dd->disk); - return 0; -@@ -4040,7 +4039,6 @@ static void mtip_pci_remove(struct pci_dev *pdev) - dev_info(&dd->pdev->dev, "device %s surprise removal\n", - dd->disk->disk_name); - -- blk_cleanup_queue(dd->queue); - blk_mq_free_tag_set(&dd->tags); - - /* De-initialize the protocol layer. */ -diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c -index 409c76b81aed4..a4470374f54fc 100644 ---- a/drivers/block/rnbd/rnbd-clt.c -+++ b/drivers/block/rnbd/rnbd-clt.c -@@ -1755,7 +1755,7 @@ static void rnbd_destroy_sessions(void) - list_for_each_entry_safe(dev, tn, &sess->devs_list, list) { - /* - * Here unmap happens in parallel for only one reason: -- * blk_cleanup_queue() takes around half a second, so -+ * del_gendisk() takes around half a second, so - * on huge amount of devices the whole module unload - * procedure takes minutes. - */ -diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c -index 63b4f6431d2e6..75057dbbcfbea 100644 ---- a/drivers/block/sx8.c -+++ b/drivers/block/sx8.c -@@ -1536,7 +1536,7 @@ err_out_free_majors: - clear_bit(0, &carm_major_alloc); - else if (host->major == 161) - clear_bit(1, &carm_major_alloc); -- blk_cleanup_queue(host->oob_q); -+ blk_mq_destroy_queue(host->oob_q); - blk_mq_free_tag_set(&host->tag_set); - err_out_dma_free: - dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); -@@ -1570,7 +1570,7 @@ static void carm_remove_one (struct pci_dev *pdev) - clear_bit(0, &carm_major_alloc); - else if (host->major == 161) - clear_bit(1, &carm_major_alloc); -- blk_cleanup_queue(host->oob_q); -+ blk_mq_destroy_queue(host->oob_q); - blk_mq_free_tag_set(&host->tag_set); - dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma); - iounmap(host->mmio); -diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c -index d756423e0059a..59d6d5faf7396 100644 ---- a/drivers/block/virtio_blk.c -+++ b/drivers/block/virtio_blk.c -@@ -1107,7 +1107,6 @@ static void virtblk_remove(struct virtio_device *vdev) - flush_work(&vblk->config_work); - - del_gendisk(vblk->disk); -- blk_cleanup_queue(vblk->disk->queue); - blk_mq_free_tag_set(&vblk->tag_set); - - mutex_lock(&vblk->vdev_mutex); -diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c -index 7a6ed83481b8d..18ad43d9933ec 100644 ---- a/drivers/block/z2ram.c -+++ b/drivers/block/z2ram.c -@@ -384,7 +384,6 @@ static void __exit z2_exit(void) - - for (i = 0; i < Z2MINOR_COUNT; i++) { - del_gendisk(z2ram_gendisk[i]); -- blk_cleanup_queue(z2ram_gendisk[i]->queue); - put_disk(z2ram_gendisk[i]); - } - blk_mq_free_tag_set(&tag_set); -diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c -index 8e78b37d0f6a4..f4cc90ea6198e 100644 ---- a/drivers/cdrom/gdrom.c -+++ b/drivers/cdrom/gdrom.c -@@ -831,7 +831,6 @@ probe_fail_no_mem: - - static int remove_gdrom(struct platform_device *devptr) - { -- blk_cleanup_queue(gd.gdrom_rq); - blk_mq_free_tag_set(&gd.tag_set); - free_irq(HW_EVENT_GDROM_CMD, &gd); - free_irq(HW_EVENT_GDROM_DMA, &gd); -diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c -index cb6401c9e9a4f..acf31cc1dbcca 100644 ---- a/drivers/dax/hmem/device.c -+++ b/drivers/dax/hmem/device.c -@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r) - .start = r->start, - .end = r->end, - .flags = IORESOURCE_MEM, -+ .desc = IORES_DESC_SOFT_RESERVED, - }; - struct platform_device *pdev; - struct memregion_info info; -diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c -index d4f1e4e9603a4..85e00701473cb 100644 ---- a/drivers/dma/ti/k3-udma-private.c -+++ b/drivers/dma/ti/k3-udma-private.c -@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) - } - - pdev = of_find_device_by_node(udma_node); -+ if (np != udma_node) -+ of_node_put(udma_node); -+ - if (!pdev) { - pr_debug("UDMA device not found\n"); - return ERR_PTR(-EPROBE_DEFER); - } - -- if (np != udma_node) -- of_node_put(udma_node); -- - ud = platform_get_drvdata(pdev); - if (!ud) { - pr_debug("UDMA has not been probed\n"); -diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c -index 673f3eb498f43..e9afa8cab7309 100644 ---- a/drivers/firmware/arm_scmi/reset.c -+++ b/drivers/firmware/arm_scmi/reset.c -@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, - struct scmi_xfer *t; - struct scmi_msg_reset_domain_reset *dom; - struct scmi_reset_info *pi = ph->get_priv(ph); -- struct reset_dom_info *rdom = pi->dom_info + domain; -+ struct reset_dom_info *rdom; - -- if (rdom->async_reset) -+ if (domain >= pi->num_domains) -+ return -EINVAL; -+ -+ rdom = pi->dom_info + domain; -+ if (rdom->async_reset && flags & AUTONOMOUS_RESET) - flags |= ASYNCHRONOUS_RESET; - - ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t); -@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain, - dom->flags = cpu_to_le32(flags); - dom->reset_state = cpu_to_le32(state); - -- if (rdom->async_reset) -+ if (flags & ASYNCHRONOUS_RESET) - ret = ph->xops->do_xfer_with_response(ph, t); - else - ret = ph->xops->do_xfer(ph, t); -diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c -index 8a18930f3eb69..516f4f0069bd2 100644 ---- a/drivers/firmware/efi/libstub/secureboot.c -+++ b/drivers/firmware/efi/libstub/secureboot.c -@@ -14,7 +14,7 @@ - - /* SHIM variables */ - static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; --static const efi_char16_t shim_MokSBState_name[] = L"MokSBState"; -+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT"; - - static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr, - unsigned long *data_size, void *data) -@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void) - - /* - * See if a user has put the shim into insecure mode. If so, and if the -- * variable doesn't have the runtime attribute set, we might as well -- * honor that. -+ * variable doesn't have the non-volatile attribute set, we might as -+ * well honor that. - */ - size = sizeof(moksbstate); - status = get_efi_var(shim_MokSBState_name, &shim_guid, -@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void) - /* If it fails, we don't care why. Default to secure */ - if (status != EFI_SUCCESS) - goto secure_boot_enabled; -- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) -+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1) - return efi_secureboot_mode_disabled; - - secure_boot_enabled: -diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c -index 05ae8bcc9d671..9780f32a9f243 100644 ---- a/drivers/firmware/efi/libstub/x86-stub.c -+++ b/drivers/firmware/efi/libstub/x86-stub.c -@@ -517,6 +517,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, - hdr->ramdisk_image = 0; - hdr->ramdisk_size = 0; - -+ /* -+ * Disregard any setup data that was provided by the bootloader: -+ * setup_data could be pointing anywhere, and we have no way of -+ * authenticating or validating the payload. -+ */ -+ hdr->setup_data = 0; -+ - efi_stub_entry(handle, sys_table_arg, boot_params); - /* not reached */ - -diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c -index 312309be0287d..56656fb519f85 100644 ---- a/drivers/gpio/gpio-ixp4xx.c -+++ b/drivers/gpio/gpio-ixp4xx.c -@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d) - __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS); - } - -+static void ixp4xx_gpio_mask_irq(struct irq_data *d) -+{ -+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d); -+ -+ irq_chip_mask_parent(d); -+ gpiochip_disable_irq(gc, d->hwirq); -+} -+ - static void ixp4xx_gpio_irq_unmask(struct irq_data *d) - { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); -@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d) - if (!(g->irq_edge & BIT(d->hwirq))) - ixp4xx_gpio_irq_ack(d); - -+ gpiochip_enable_irq(gc, d->hwirq); - irq_chip_unmask_parent(d); - } - -@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type) - return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); - } - --static struct irq_chip ixp4xx_gpio_irqchip = { -+static const struct irq_chip ixp4xx_gpio_irqchip = { - .name = "IXP4GPIO", - .irq_ack = ixp4xx_gpio_irq_ack, -- .irq_mask = irq_chip_mask_parent, -+ .irq_mask = ixp4xx_gpio_mask_irq, - .irq_unmask = ixp4xx_gpio_irq_unmask, - .irq_set_type = ixp4xx_gpio_irq_set_type, -+ .flags = IRQCHIP_IMMUTABLE, -+ GPIOCHIP_IRQ_RESOURCE_HELPERS, - }; - - static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, -@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev) - g->gc.owner = THIS_MODULE; - - girq = &g->gc.irq; -- girq->chip = &ixp4xx_gpio_irqchip; -+ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip); - girq->fwnode = g->fwnode; - girq->parent_domain = parent; - girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq; -diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c -index a2e505a7545cd..523dfd17dd922 100644 ---- a/drivers/gpio/gpio-mockup.c -+++ b/drivers/gpio/gpio-mockup.c -@@ -533,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx) - } - - fwnode = fwnode_create_software_node(properties, NULL); -- if (IS_ERR(fwnode)) -+ if (IS_ERR(fwnode)) { -+ kfree_strarray(line_names, ngpio); - return PTR_ERR(fwnode); -+ } - - pdevinfo.name = "gpio-mockup"; - pdevinfo.id = idx; -@@ -597,9 +599,9 @@ static int __init gpio_mockup_init(void) - - static void __exit gpio_mockup_exit(void) - { -+ gpio_mockup_unregister_pdevs(); - debugfs_remove_recursive(gpio_mockup_dbg_dir); - platform_driver_unregister(&gpio_mockup_driver); -- gpio_mockup_unregister_pdevs(); - } - - module_init(gpio_mockup_init); -diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c -index d8a26e503ca5d..f163f5ca857be 100644 ---- a/drivers/gpio/gpio-mt7621.c -+++ b/drivers/gpio/gpio-mt7621.c -@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d) - unsigned long flags; - u32 rise, fall, high, low; - -+ gpiochip_enable_irq(gc, d->hwirq); -+ - spin_lock_irqsave(&rg->lock, flags); - rise = mtk_gpio_r32(rg, GPIO_REG_REDGE); - fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE); -@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d) - mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin)); - mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin)); - spin_unlock_irqrestore(&rg->lock, flags); -+ -+ gpiochip_disable_irq(gc, d->hwirq); - } - - static int -@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip, - return gpio % MTK_BANK_WIDTH; - } - -+static const struct irq_chip mt7621_irq_chip = { -+ .name = "mt7621-gpio", -+ .irq_mask_ack = mediatek_gpio_irq_mask, -+ .irq_mask = mediatek_gpio_irq_mask, -+ .irq_unmask = mediatek_gpio_irq_unmask, -+ .irq_set_type = mediatek_gpio_irq_type, -+ .flags = IRQCHIP_IMMUTABLE, -+ GPIOCHIP_IRQ_RESOURCE_HELPERS, -+}; -+ - static int - mediatek_gpio_bank_probe(struct device *dev, int bank) - { -@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank) - return -ENOMEM; - - rg->chip.offset = bank * MTK_BANK_WIDTH; -- rg->irq_chip.name = dev_name(dev); -- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; -- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; -- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; -- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; - - if (mtk->gpio_irq) { - struct gpio_irq_chip *girq; -@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank) - } - - girq = &rg->chip.irq; -- girq->chip = &rg->irq_chip; -+ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip); - /* This will let us handle the parent IRQ in the driver */ - girq->parent_handler = NULL; - girq->num_parents = 0; -diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c -index fa4bc7481f9a6..e739dcea61b23 100644 ---- a/drivers/gpio/gpio-tqmx86.c -+++ b/drivers/gpio/gpio-tqmx86.c -@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) - girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_simple_irq; - girq->init_valid_mask = tqmx86_init_irq_valid_mask; -+ -+ irq_domain_set_pm_device(girq->domain, dev); - } - - ret = devm_gpiochip_add_data(dev, chip, gpio); -@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) - goto out_pm_dis; - } - -- irq_domain_set_pm_device(girq->domain, dev); -- - dev_info(dev, "GPIO functionality initialized with %d pins\n", - chip->ngpio); - -diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c -index b26e643383762..21fee9ed7f0d2 100644 ---- a/drivers/gpio/gpiolib-cdev.c -+++ b/drivers/gpio/gpiolib-cdev.c -@@ -1975,7 +1975,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - ret = -ENODEV; - goto out_free_le; - } -- le->irq = irq; - - if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) - irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? -@@ -1989,7 +1988,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - init_waitqueue_head(&le->wait); - - /* Request a thread to read the events */ -- ret = request_threaded_irq(le->irq, -+ ret = request_threaded_irq(irq, - lineevent_irq_handler, - lineevent_irq_thread, - irqflags, -@@ -1998,6 +1997,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) - if (ret) - goto out_free_le; - -+ le->irq = irq; -+ - fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC); - if (fd < 0) { - ret = fd; -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -index 4dfd6724b3caa..0a8c15c3a04c3 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c -@@ -35,6 +35,8 @@ - #include - #include - #include -+#include -+#include - #include - #include - #include -@@ -495,6 +497,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { - .create_handle = drm_gem_fb_create_handle, - }; - -+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = { -+ .destroy = drm_gem_fb_destroy, -+ .create_handle = drm_gem_fb_create_handle, -+ .dirty = drm_atomic_helper_dirtyfb, -+}; -+ - uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, - uint64_t bo_flags) - { -@@ -1069,7 +1077,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, - if (ret) - goto err; - -- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); -+ if (drm_drv_uses_atomic_modeset(dev)) -+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic); -+ else -+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); - if (ret) - goto err; - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -index b19bf0c3f3737..79ce654bd3dad 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -@@ -748,7 +748,7 @@ static int psp_tmr_init(struct psp_context *psp) - } - - pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; -- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev), -+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, - AMDGPU_GEM_DOMAIN_VRAM, - &psp->tmr_bo, &psp->tmr_mc_addr, pptr); - -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h -index e431f49949319..cd366c7f311fd 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h -@@ -36,6 +36,7 @@ - #define PSP_CMD_BUFFER_SIZE 0x1000 - #define PSP_1_MEG 0x100000 - #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000) -+#define PSP_TMR_ALIGNMENT 0x100000 - #define PSP_FW_NAME_LEN 0x24 - - enum psp_shared_mem_size { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c -index dac202ae864dd..9193ca5d6fe7a 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c -@@ -1805,7 +1805,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) - amdgpu_ras_query_error_status(adev, &info); - - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && -- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { -+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && -+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { - if (amdgpu_ras_reset_error_status(adev, info.head.block)) - dev_warn(adev->dev, "Failed to reset error counter and error status"); - } -diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c -index cdc0c97798483..6c1fd471a4c7d 100644 ---- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c -+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c -@@ -28,6 +28,14 @@ - #include "nbio/nbio_7_7_0_sh_mask.h" - #include - -+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev) -+{ -+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, -+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); -+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, -+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); -+} -+ - static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev) - { - u32 tmp; -@@ -237,4 +245,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = { - .ih_doorbell_range = nbio_v7_7_ih_doorbell_range, - .ih_control = nbio_v7_7_ih_control, - .init_registers = nbio_v7_7_init_registers, -+ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers, - }; -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c -index f47d82da115c9..42a567e71439b 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c -@@ -6651,8 +6651,7 @@ static double CalculateUrgentLatency( - return ret; - } - -- --static void UseMinimumDCFCLK( -+static noinline_for_stack void UseMinimumDCFCLK( - struct display_mode_lib *mode_lib, - int MaxInterDCNTileRepeaters, - int MaxPrefetchMode, -diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c -index e4b9fd31223c9..40a672236198e 100644 ---- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c -+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c -@@ -261,33 +261,13 @@ static void CalculateRowBandwidth( - - static void CalculateFlipSchedule( - struct display_mode_lib *mode_lib, -+ unsigned int k, - double HostVMInefficiencyFactor, - double UrgentExtraLatency, - double UrgentLatency, -- unsigned int GPUVMMaxPageTableLevels, -- bool HostVMEnable, -- unsigned int HostVMMaxNonCachedPageTableLevels, -- bool GPUVMEnable, -- double HostVMMinPageSize, - double PDEAndMetaPTEBytesPerFrame, - double MetaRowBytes, -- double DPTEBytesPerRow, -- double BandwidthAvailableForImmediateFlip, -- unsigned int TotImmediateFlipBytes, -- enum source_format_class SourcePixelFormat, -- double LineTime, -- double VRatio, -- double VRatioChroma, -- double Tno_bw, -- bool DCCEnable, -- unsigned int dpte_row_height, -- unsigned int meta_row_height, -- unsigned int dpte_row_height_chroma, -- unsigned int meta_row_height_chroma, -- double *DestinationLinesToRequestVMInImmediateFlip, -- double *DestinationLinesToRequestRowInImmediateFlip, -- double *final_flip_bw, -- bool *ImmediateFlipSupportedForPipe); -+ double DPTEBytesPerRow); - static double CalculateWriteBackDelay( - enum source_format_class WritebackPixelFormat, - double WritebackHRatio, -@@ -321,64 +301,28 @@ static void CalculateVupdateAndDynamicMetadataParameters( - static void CalculateWatermarksAndDRAMSpeedChangeSupport( - struct display_mode_lib *mode_lib, - unsigned int PrefetchMode, -- unsigned int NumberOfActivePlanes, -- unsigned int MaxLineBufferLines, -- unsigned int LineBufferSize, -- unsigned int WritebackInterfaceBufferSize, - double DCFCLK, - double ReturnBW, -- bool SynchronizedVBlank, -- unsigned int dpte_group_bytes[], -- unsigned int MetaChunkSize, - double UrgentLatency, - double ExtraLatency, -- double WritebackLatency, -- double WritebackChunkSize, - double SOCCLK, -- double DRAMClockChangeLatency, -- double SRExitTime, -- double SREnterPlusExitTime, -- double SRExitZ8Time, -- double SREnterPlusExitZ8Time, - double DCFCLKDeepSleep, - unsigned int DETBufferSizeY[], - unsigned int DETBufferSizeC[], - unsigned int SwathHeightY[], - unsigned int SwathHeightC[], -- unsigned int LBBitPerPixel[], - double SwathWidthY[], - double SwathWidthC[], -- double HRatio[], -- double HRatioChroma[], -- unsigned int vtaps[], -- unsigned int VTAPsChroma[], -- double VRatio[], -- double VRatioChroma[], -- unsigned int HTotal[], -- double PixelClock[], -- unsigned int BlendingAndTiming[], - unsigned int DPPPerPlane[], - double BytePerPixelDETY[], - double BytePerPixelDETC[], -- double DSTXAfterScaler[], -- double DSTYAfterScaler[], -- bool WritebackEnable[], -- enum source_format_class WritebackPixelFormat[], -- double WritebackDestinationWidth[], -- double WritebackDestinationHeight[], -- double WritebackSourceHeight[], - bool UnboundedRequestEnabled, - int unsigned CompressedBufferSizeInkByte, - enum clock_change_support *DRAMClockChangeSupport, -- double *UrgentWatermark, -- double *WritebackUrgentWatermark, -- double *DRAMClockChangeWatermark, -- double *WritebackDRAMClockChangeWatermark, - double *StutterExitWatermark, - double *StutterEnterPlusExitWatermark, - double *Z8StutterExitWatermark, -- double *Z8StutterEnterPlusExitWatermark, -- double *MinActiveDRAMClockChangeLatencySupported); -+ double *Z8StutterEnterPlusExitWatermark); - - static void CalculateDCFCLKDeepSleep( - struct display_mode_lib *mode_lib, -@@ -2914,33 +2858,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman - for (k = 0; k < v->NumberOfActivePlanes; ++k) { - CalculateFlipSchedule( - mode_lib, -+ k, - HostVMInefficiencyFactor, - v->UrgentExtraLatency, - v->UrgentLatency, -- v->GPUVMMaxPageTableLevels, -- v->HostVMEnable, -- v->HostVMMaxNonCachedPageTableLevels, -- v->GPUVMEnable, -- v->HostVMMinPageSize, - v->PDEAndMetaPTEBytesFrame[k], - v->MetaRowByte[k], -- v->PixelPTEBytesPerRow[k], -- v->BandwidthAvailableForImmediateFlip, -- v->TotImmediateFlipBytes, -- v->SourcePixelFormat[k], -- v->HTotal[k] / v->PixelClock[k], -- v->VRatio[k], -- v->VRatioChroma[k], -- v->Tno_bw[k], -- v->DCCEnable[k], -- v->dpte_row_height[k], -- v->meta_row_height[k], -- v->dpte_row_height_chroma[k], -- v->meta_row_height_chroma[k], -- &v->DestinationLinesToRequestVMInImmediateFlip[k], -- &v->DestinationLinesToRequestRowInImmediateFlip[k], -- &v->final_flip_bw[k], -- &v->ImmediateFlipSupportedForPipe[k]); -+ v->PixelPTEBytesPerRow[k]); - } - - v->total_dcn_read_bw_with_flip = 0.0; -@@ -3027,64 +2951,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman - CalculateWatermarksAndDRAMSpeedChangeSupport( - mode_lib, - PrefetchMode, -- v->NumberOfActivePlanes, -- v->MaxLineBufferLines, -- v->LineBufferSize, -- v->WritebackInterfaceBufferSize, - v->DCFCLK, - v->ReturnBW, -- v->SynchronizedVBlank, -- v->dpte_group_bytes, -- v->MetaChunkSize, - v->UrgentLatency, - v->UrgentExtraLatency, -- v->WritebackLatency, -- v->WritebackChunkSize, - v->SOCCLK, -- v->DRAMClockChangeLatency, -- v->SRExitTime, -- v->SREnterPlusExitTime, -- v->SRExitZ8Time, -- v->SREnterPlusExitZ8Time, - v->DCFCLKDeepSleep, - v->DETBufferSizeY, - v->DETBufferSizeC, - v->SwathHeightY, - v->SwathHeightC, -- v->LBBitPerPixel, - v->SwathWidthY, - v->SwathWidthC, -- v->HRatio, -- v->HRatioChroma, -- v->vtaps, -- v->VTAPsChroma, -- v->VRatio, -- v->VRatioChroma, -- v->HTotal, -- v->PixelClock, -- v->BlendingAndTiming, - v->DPPPerPlane, - v->BytePerPixelDETY, - v->BytePerPixelDETC, -- v->DSTXAfterScaler, -- v->DSTYAfterScaler, -- v->WritebackEnable, -- v->WritebackPixelFormat, -- v->WritebackDestinationWidth, -- v->WritebackDestinationHeight, -- v->WritebackSourceHeight, - v->UnboundedRequestEnabled, - v->CompressedBufferSizeInkByte, - &DRAMClockChangeSupport, -- &v->UrgentWatermark, -- &v->WritebackUrgentWatermark, -- &v->DRAMClockChangeWatermark, -- &v->WritebackDRAMClockChangeWatermark, - &v->StutterExitWatermark, - &v->StutterEnterPlusExitWatermark, - &v->Z8StutterExitWatermark, -- &v->Z8StutterEnterPlusExitWatermark, -- &v->MinActiveDRAMClockChangeLatencySupported); -+ &v->Z8StutterEnterPlusExitWatermark); - - for (k = 0; k < v->NumberOfActivePlanes; ++k) { - if (v->WritebackEnable[k] == true) { -@@ -3696,61 +3584,43 @@ static void CalculateRowBandwidth( - - static void CalculateFlipSchedule( - struct display_mode_lib *mode_lib, -+ unsigned int k, - double HostVMInefficiencyFactor, - double UrgentExtraLatency, - double UrgentLatency, -- unsigned int GPUVMMaxPageTableLevels, -- bool HostVMEnable, -- unsigned int HostVMMaxNonCachedPageTableLevels, -- bool GPUVMEnable, -- double HostVMMinPageSize, - double PDEAndMetaPTEBytesPerFrame, - double MetaRowBytes, -- double DPTEBytesPerRow, -- double BandwidthAvailableForImmediateFlip, -- unsigned int TotImmediateFlipBytes, -- enum source_format_class SourcePixelFormat, -- double LineTime, -- double VRatio, -- double VRatioChroma, -- double Tno_bw, -- bool DCCEnable, -- unsigned int dpte_row_height, -- unsigned int meta_row_height, -- unsigned int dpte_row_height_chroma, -- unsigned int meta_row_height_chroma, -- double *DestinationLinesToRequestVMInImmediateFlip, -- double *DestinationLinesToRequestRowInImmediateFlip, -- double *final_flip_bw, -- bool *ImmediateFlipSupportedForPipe) -+ double DPTEBytesPerRow) - { -+ struct vba_vars_st *v = &mode_lib->vba; - double min_row_time = 0.0; - unsigned int HostVMDynamicLevelsTrips; - double TimeForFetchingMetaPTEImmediateFlip; - double TimeForFetchingRowInVBlankImmediateFlip; - double ImmediateFlipBW; -+ double LineTime = v->HTotal[k] / v->PixelClock[k]; - -- if (GPUVMEnable == true && HostVMEnable == true) { -- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; -+ if (v->GPUVMEnable == true && v->HostVMEnable == true) { -+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels; - } else { - HostVMDynamicLevelsTrips = 0; - } - -- if (GPUVMEnable == true || DCCEnable == true) { -- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes; -+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) { -+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes; - } - -- if (GPUVMEnable == true) { -+ if (v->GPUVMEnable == true) { - TimeForFetchingMetaPTEImmediateFlip = dml_max3( -- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, -- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), -+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW, -+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), - LineTime / 4.0); - } else { - TimeForFetchingMetaPTEImmediateFlip = 0; - } - -- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; -- if ((GPUVMEnable == true || DCCEnable == true)) { -+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0; -+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { - TimeForFetchingRowInVBlankImmediateFlip = dml_max3( - (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW, - UrgentLatency * (HostVMDynamicLevelsTrips + 1), -@@ -3759,54 +3629,54 @@ static void CalculateFlipSchedule( - TimeForFetchingRowInVBlankImmediateFlip = 0; - } - -- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; -+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0; - -- if (GPUVMEnable == true) { -- *final_flip_bw = dml_max( -- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), -- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime)); -- } else if ((GPUVMEnable == true || DCCEnable == true)) { -- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime); -+ if (v->GPUVMEnable == true) { -+ v->final_flip_bw[k] = dml_max( -+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime), -+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime)); -+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) { -+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime); - } else { -- *final_flip_bw = 0; -+ v->final_flip_bw[k] = 0; - } - -- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) { -- if (GPUVMEnable == true && DCCEnable != true) { -- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma); -- } else if (GPUVMEnable != true && DCCEnable == true) { -- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma); -+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) { -+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { -+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); -+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { -+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); - } else { - min_row_time = dml_min4( -- dpte_row_height * LineTime / VRatio, -- meta_row_height * LineTime / VRatio, -- dpte_row_height_chroma * LineTime / VRatioChroma, -- meta_row_height_chroma * LineTime / VRatioChroma); -+ v->dpte_row_height[k] * LineTime / v->VRatio[k], -+ v->meta_row_height[k] * LineTime / v->VRatio[k], -+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k], -+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]); - } - } else { -- if (GPUVMEnable == true && DCCEnable != true) { -- min_row_time = dpte_row_height * LineTime / VRatio; -- } else if (GPUVMEnable != true && DCCEnable == true) { -- min_row_time = meta_row_height * LineTime / VRatio; -+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) { -+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k]; -+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) { -+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k]; - } else { -- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio); -+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]); - } - } - -- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16 -+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16 - || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) { -- *ImmediateFlipSupportedForPipe = false; -+ v->ImmediateFlipSupportedForPipe[k] = false; - } else { -- *ImmediateFlipSupportedForPipe = true; -+ v->ImmediateFlipSupportedForPipe[k] = true; - } - - #ifdef __DML_VBA_DEBUG__ -- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip); -- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip); -+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]); -+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]); - dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip); - dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip); - dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time); -- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe); -+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]); - #endif - - } -@@ -5397,33 +5267,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - for (k = 0; k < v->NumberOfActivePlanes; k++) { - CalculateFlipSchedule( - mode_lib, -+ k, - HostVMInefficiencyFactor, - v->ExtraLatency, - v->UrgLatency[i], -- v->GPUVMMaxPageTableLevels, -- v->HostVMEnable, -- v->HostVMMaxNonCachedPageTableLevels, -- v->GPUVMEnable, -- v->HostVMMinPageSize, - v->PDEAndMetaPTEBytesPerFrame[i][j][k], - v->MetaRowBytes[i][j][k], -- v->DPTEBytesPerRow[i][j][k], -- v->BandwidthAvailableForImmediateFlip, -- v->TotImmediateFlipBytes, -- v->SourcePixelFormat[k], -- v->HTotal[k] / v->PixelClock[k], -- v->VRatio[k], -- v->VRatioChroma[k], -- v->Tno_bw[k], -- v->DCCEnable[k], -- v->dpte_row_height[k], -- v->meta_row_height[k], -- v->dpte_row_height_chroma[k], -- v->meta_row_height_chroma[k], -- &v->DestinationLinesToRequestVMInImmediateFlip[k], -- &v->DestinationLinesToRequestRowInImmediateFlip[k], -- &v->final_flip_bw[k], -- &v->ImmediateFlipSupportedForPipe[k]); -+ v->DPTEBytesPerRow[i][j][k]); - } - v->total_dcn_read_bw_with_flip = 0.0; - for (k = 0; k < v->NumberOfActivePlanes; k++) { -@@ -5481,64 +5331,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - CalculateWatermarksAndDRAMSpeedChangeSupport( - mode_lib, - v->PrefetchModePerState[i][j], -- v->NumberOfActivePlanes, -- v->MaxLineBufferLines, -- v->LineBufferSize, -- v->WritebackInterfaceBufferSize, - v->DCFCLKState[i][j], - v->ReturnBWPerState[i][j], -- v->SynchronizedVBlank, -- v->dpte_group_bytes, -- v->MetaChunkSize, - v->UrgLatency[i], - v->ExtraLatency, -- v->WritebackLatency, -- v->WritebackChunkSize, - v->SOCCLKPerState[i], -- v->DRAMClockChangeLatency, -- v->SRExitTime, -- v->SREnterPlusExitTime, -- v->SRExitZ8Time, -- v->SREnterPlusExitZ8Time, - v->ProjectedDCFCLKDeepSleep[i][j], - v->DETBufferSizeYThisState, - v->DETBufferSizeCThisState, - v->SwathHeightYThisState, - v->SwathHeightCThisState, -- v->LBBitPerPixel, - v->SwathWidthYThisState, - v->SwathWidthCThisState, -- v->HRatio, -- v->HRatioChroma, -- v->vtaps, -- v->VTAPsChroma, -- v->VRatio, -- v->VRatioChroma, -- v->HTotal, -- v->PixelClock, -- v->BlendingAndTiming, - v->NoOfDPPThisState, - v->BytePerPixelInDETY, - v->BytePerPixelInDETC, -- v->DSTXAfterScaler, -- v->DSTYAfterScaler, -- v->WritebackEnable, -- v->WritebackPixelFormat, -- v->WritebackDestinationWidth, -- v->WritebackDestinationHeight, -- v->WritebackSourceHeight, - UnboundedRequestEnabledThisState, - CompressedBufferSizeInkByteThisState, - &v->DRAMClockChangeSupport[i][j], -- &v->UrgentWatermark, -- &v->WritebackUrgentWatermark, -- &v->DRAMClockChangeWatermark, -- &v->WritebackDRAMClockChangeWatermark, -- &dummy, - &dummy, - &dummy, - &dummy, -- &v->MinActiveDRAMClockChangeLatencySupported); -+ &dummy); - } - } - -@@ -5663,64 +5477,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l - static void CalculateWatermarksAndDRAMSpeedChangeSupport( - struct display_mode_lib *mode_lib, - unsigned int PrefetchMode, -- unsigned int NumberOfActivePlanes, -- unsigned int MaxLineBufferLines, -- unsigned int LineBufferSize, -- unsigned int WritebackInterfaceBufferSize, - double DCFCLK, - double ReturnBW, -- bool SynchronizedVBlank, -- unsigned int dpte_group_bytes[], -- unsigned int MetaChunkSize, - double UrgentLatency, - double ExtraLatency, -- double WritebackLatency, -- double WritebackChunkSize, - double SOCCLK, -- double DRAMClockChangeLatency, -- double SRExitTime, -- double SREnterPlusExitTime, -- double SRExitZ8Time, -- double SREnterPlusExitZ8Time, - double DCFCLKDeepSleep, - unsigned int DETBufferSizeY[], - unsigned int DETBufferSizeC[], - unsigned int SwathHeightY[], - unsigned int SwathHeightC[], -- unsigned int LBBitPerPixel[], - double SwathWidthY[], - double SwathWidthC[], -- double HRatio[], -- double HRatioChroma[], -- unsigned int vtaps[], -- unsigned int VTAPsChroma[], -- double VRatio[], -- double VRatioChroma[], -- unsigned int HTotal[], -- double PixelClock[], -- unsigned int BlendingAndTiming[], - unsigned int DPPPerPlane[], - double BytePerPixelDETY[], - double BytePerPixelDETC[], -- double DSTXAfterScaler[], -- double DSTYAfterScaler[], -- bool WritebackEnable[], -- enum source_format_class WritebackPixelFormat[], -- double WritebackDestinationWidth[], -- double WritebackDestinationHeight[], -- double WritebackSourceHeight[], - bool UnboundedRequestEnabled, - int unsigned CompressedBufferSizeInkByte, - enum clock_change_support *DRAMClockChangeSupport, -- double *UrgentWatermark, -- double *WritebackUrgentWatermark, -- double *DRAMClockChangeWatermark, -- double *WritebackDRAMClockChangeWatermark, - double *StutterExitWatermark, - double *StutterEnterPlusExitWatermark, - double *Z8StutterExitWatermark, -- double *Z8StutterEnterPlusExitWatermark, -- double *MinActiveDRAMClockChangeLatencySupported) -+ double *Z8StutterEnterPlusExitWatermark) - { - struct vba_vars_st *v = &mode_lib->vba; - double EffectiveLBLatencyHidingY; -@@ -5740,103 +5518,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - double TotalPixelBW = 0.0; - int k, j; - -- *UrgentWatermark = UrgentLatency + ExtraLatency; -+ v->UrgentWatermark = UrgentLatency + ExtraLatency; - - #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency); - dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency); -- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark); -+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark); - #endif - -- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark; -+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark; - - #ifdef __DML_VBA_DEBUG__ -- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency); -- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark); -+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency); -+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark); - #endif - - v->TotalActiveWriteback = 0; -- for (k = 0; k < NumberOfActivePlanes; ++k) { -- if (WritebackEnable[k] == true) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { -+ if (v->WritebackEnable[k] == true) { - v->TotalActiveWriteback = v->TotalActiveWriteback + 1; - } - } - - if (v->TotalActiveWriteback <= 1) { -- *WritebackUrgentWatermark = WritebackLatency; -+ v->WritebackUrgentWatermark = v->WritebackLatency; - } else { -- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; -+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; - } - - if (v->TotalActiveWriteback <= 1) { -- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency; -+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency; - } else { -- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; -+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK; - } - -- for (k = 0; k < NumberOfActivePlanes; ++k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { - TotalPixelBW = TotalPixelBW -- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) -- / (HTotal[k] / PixelClock[k]); -+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) -+ / (v->HTotal[k] / v->PixelClock[k]); - } - -- for (k = 0; k < NumberOfActivePlanes; ++k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { - double EffectiveDETBufferSizeY = DETBufferSizeY[k]; - - v->LBLatencyHidingSourceLinesY = dml_min( -- (double) MaxLineBufferLines, -- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1); -+ (double) v->MaxLineBufferLines, -+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1); - - v->LBLatencyHidingSourceLinesC = dml_min( -- (double) MaxLineBufferLines, -- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1); -+ (double) v->MaxLineBufferLines, -+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1); - -- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]); -+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]); - -- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]); -+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]); - - if (UnboundedRequestEnabled) { - EffectiveDETBufferSizeY = EffectiveDETBufferSizeY -- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW; -+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW; - } - - LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; - LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]); -- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k]; -+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k]; - if (BytePerPixelDETC[k] > 0) { - LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k]; - LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]); -- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k]; -+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k]; - } else { - LinesInDETC = 0; - FullDETBufferingTimeC = 999999; - } - - ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY -- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; -+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - -- if (NumberOfActivePlanes > 1) { -+ if (v->NumberOfActivePlanes > 1) { - ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY -- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k]; -+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; - } - - if (BytePerPixelDETC[k] > 0) { - ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC -- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark; -+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark; - -- if (NumberOfActivePlanes > 1) { -+ if (v->NumberOfActivePlanes > 1) { - ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC -- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k]; -+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k]; - } - v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC); - } else { - v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY; - } - -- if (WritebackEnable[k] == true) { -- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 -- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4); -- if (WritebackPixelFormat[k] == dm_444_64) { -+ if (v->WritebackEnable[k] == true) { -+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 -+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4); -+ if (v->WritebackPixelFormat[k] == dm_444_64) { - WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2; - } - WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark; -@@ -5846,14 +5624,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - - v->MinActiveDRAMClockChangeMargin = 999999; - PlaneWithMinActiveDRAMClockChangeMargin = 0; -- for (k = 0; k < NumberOfActivePlanes; ++k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { - if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) { - v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k]; -- if (BlendingAndTiming[k] == k) { -+ if (v->BlendingAndTiming[k] == k) { - PlaneWithMinActiveDRAMClockChangeMargin = k; - } else { -- for (j = 0; j < NumberOfActivePlanes; ++j) { -- if (BlendingAndTiming[k] == j) { -+ for (j = 0; j < v->NumberOfActivePlanes; ++j) { -+ if (v->BlendingAndTiming[k] == j) { - PlaneWithMinActiveDRAMClockChangeMargin = j; - } - } -@@ -5861,11 +5639,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - } - } - -- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency; -+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ; - - SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999; -- for (k = 0; k < NumberOfActivePlanes; ++k) { -- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { -+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) - && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) { - SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k]; - } -@@ -5873,25 +5651,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( - - v->TotalNumberOfActiveOTG = 0; - -- for (k = 0; k < NumberOfActivePlanes; ++k) { -- if (BlendingAndTiming[k] == k) { -+ for (k = 0; k < v->NumberOfActivePlanes; ++k) { -+ if (v->BlendingAndTiming[k] == k) { - v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1; - } - } - - if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) { - *DRAMClockChangeSupport = dm_dram_clock_change_vactive; -- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 -+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 - || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) { - *DRAMClockChangeSupport = dm_dram_clock_change_vblank; - } else { - *DRAMClockChangeSupport = dm_dram_clock_change_unsupported; - } - -- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; -- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); -- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; -- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; -+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep; -+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep); -+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; -+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep; - - #ifdef __DML_VBA_DEBUG__ - dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark); -diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c -index 64a38f08f4974..5a51be753e87f 100644 ---- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c -+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c -@@ -1603,6 +1603,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, - struct fixed31_32 lut2; - struct fixed31_32 delta_lut; - struct fixed31_32 delta_index; -+ const struct fixed31_32 one = dc_fixpt_from_int(1); - - i = 0; - /* fixed_pt library has problems handling too small values */ -@@ -1631,6 +1632,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, - } else - hw_x = coordinates_x[i].x; - -+ if (dc_fixpt_le(one, hw_x)) -+ hw_x = one; -+ - norm_x = dc_fixpt_mul(norm_factor, hw_x); - index = dc_fixpt_floor(norm_x); - if (index < 0 || index > 255) -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -index 32bb6b1d95261..d13e455c8827e 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) - smu_baco->platform_support = - (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : - false; -+ -+ /* -+ * Disable BACO entry/exit completely on below SKUs to -+ * avoid hardware intermittent failures. -+ */ -+ if (((adev->pdev->device == 0x73A1) && -+ (adev->pdev->revision == 0x00)) || -+ ((adev->pdev->device == 0x73BF) && -+ (adev->pdev->revision == 0xCF))) -+ smu_baco->platform_support = false; -+ - } - } - -diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c -index dd32b484dd825..ce96234f3df20 100644 ---- a/drivers/gpu/drm/gma500/cdv_device.c -+++ b/drivers/gpu/drm/gma500/cdv_device.c -@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = { - static int cdv_chip_setup(struct drm_device *dev) - { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); -- struct pci_dev *pdev = to_pci_dev(dev->dev); - INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func); - -- if (pci_enable_msi(pdev)) -- dev_warn(dev->dev, "Enabling MSI failed!\n"); -+ dev_priv->use_msi = true; - dev_priv->regmap = cdv_regmap; - gma_get_core_freq(dev); - psb_intel_opregion_init(dev); -diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c -index dffe37490206d..4b7627a726378 100644 ---- a/drivers/gpu/drm/gma500/gem.c -+++ b/drivers/gpu/drm/gma500/gem.c -@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj) - { - struct psb_gem_object *pobj = to_psb_gem_object(obj); - -- drm_gem_object_release(obj); -- - /* Undo the mmap pin if we are destroying the object */ - if (pobj->mmapping) - psb_gem_unpin(pobj); - -+ drm_gem_object_release(obj); -+ - WARN_ON(pobj->in_gart && !pobj->stolen); - - release_resource(&pobj->resource); -diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c -index 34ec3fca09ba6..12287c9bb4d80 100644 ---- a/drivers/gpu/drm/gma500/gma_display.c -+++ b/drivers/gpu/drm/gma500/gma_display.c -@@ -531,15 +531,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc, - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - - gma_crtc->page_flip_event = event; -+ spin_unlock_irqrestore(&dev->event_lock, flags); - - /* Call this locked if we want an event at vblank interrupt. */ - ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); - if (ret) { -- gma_crtc->page_flip_event = NULL; -- drm_crtc_vblank_put(crtc); -+ spin_lock_irqsave(&dev->event_lock, flags); -+ if (gma_crtc->page_flip_event) { -+ gma_crtc->page_flip_event = NULL; -+ drm_crtc_vblank_put(crtc); -+ } -+ spin_unlock_irqrestore(&dev->event_lock, flags); - } -- -- spin_unlock_irqrestore(&dev->event_lock, flags); - } else { - ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); - } -diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c -index 5923a9c893122..f90e628cb482c 100644 ---- a/drivers/gpu/drm/gma500/oaktrail_device.c -+++ b/drivers/gpu/drm/gma500/oaktrail_device.c -@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = { - static int oaktrail_chip_setup(struct drm_device *dev) - { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); -- struct pci_dev *pdev = to_pci_dev(dev->dev); - int ret; - -- if (pci_enable_msi(pdev)) -- dev_warn(dev->dev, "Enabling MSI failed!\n"); -- -+ dev_priv->use_msi = true; - dev_priv->regmap = oaktrail_regmap; - - ret = mid_chip_setup(dev); -diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c -index b91de6d36e412..66873085d4505 100644 ---- a/drivers/gpu/drm/gma500/power.c -+++ b/drivers/gpu/drm/gma500/power.c -@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev) - dev_priv->regs.saveBSM = bsm; - pci_read_config_dword(pdev, 0xFC, &vbt); - dev_priv->regs.saveVBT = vbt; -- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); -- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); - - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); -@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev) - pci_restore_state(pdev); - pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM); - pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT); -- /* restoring MSI address and data in PCIx space */ -- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); -- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); - ret = pci_enable_device(pdev); - - if (ret != 0) -@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev) - mutex_lock(&power_mutex); - gma_resume_pci(pdev); - gma_resume_display(pdev); -- gma_irq_preinstall(dev); -- gma_irq_postinstall(dev); -+ gma_irq_install(dev); - mutex_unlock(&power_mutex); - return 0; - } -diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c -index 1d8744f3e7020..54e756b486060 100644 ---- a/drivers/gpu/drm/gma500/psb_drv.c -+++ b/drivers/gpu/drm/gma500/psb_drv.c -@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) - PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - -- gma_irq_install(dev, pdev->irq); -+ gma_irq_install(dev); - - dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - -diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h -index 0ddfec1a0851d..4c3fc5eaf6ad5 100644 ---- a/drivers/gpu/drm/gma500/psb_drv.h -+++ b/drivers/gpu/drm/gma500/psb_drv.h -@@ -490,6 +490,7 @@ struct drm_psb_private { - int rpm_enabled; - - /* MID specific */ -+ bool use_msi; - bool has_gct; - struct oaktrail_gct_data gct_data; - -@@ -499,10 +500,6 @@ struct drm_psb_private { - /* Register state */ - struct psb_save_area regs; - -- /* MSI reg save */ -- uint32_t msi_addr; -- uint32_t msi_data; -- - /* Hotplug handling */ - struct work_struct hotplug_work; - -diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c -index e6e6d61bbeab6..038f18ed0a95e 100644 ---- a/drivers/gpu/drm/gma500/psb_irq.c -+++ b/drivers/gpu/drm/gma500/psb_irq.c -@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev) - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - } - --int gma_irq_install(struct drm_device *dev, unsigned int irq) -+int gma_irq_install(struct drm_device *dev) - { -+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev); -+ struct pci_dev *pdev = to_pci_dev(dev->dev); - int ret; - -- if (irq == IRQ_NOTCONNECTED) -+ if (dev_priv->use_msi && pci_enable_msi(pdev)) { -+ dev_warn(dev->dev, "Enabling MSI failed!\n"); -+ dev_priv->use_msi = false; -+ } -+ -+ if (pdev->irq == IRQ_NOTCONNECTED) - return -ENOTCONN; - - gma_irq_preinstall(dev); - - /* PCI devices require shared interrupts. */ -- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); -+ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); - if (ret) - return ret; - -@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev) - spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - - free_irq(pdev->irq, dev); -+ if (dev_priv->use_msi) -+ pci_disable_msi(pdev); - } - - int gma_crtc_enable_vblank(struct drm_crtc *crtc) -diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h -index b51e395194fff..7648f69824a5d 100644 ---- a/drivers/gpu/drm/gma500/psb_irq.h -+++ b/drivers/gpu/drm/gma500/psb_irq.h -@@ -17,7 +17,7 @@ struct drm_device; - - void gma_irq_preinstall(struct drm_device *dev); - void gma_irq_postinstall(struct drm_device *dev); --int gma_irq_install(struct drm_device *dev, unsigned int irq); -+int gma_irq_install(struct drm_device *dev); - void gma_irq_uninstall(struct drm_device *dev); - - int gma_crtc_enable_vblank(struct drm_crtc *crtc); -diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig -index 073adfe438ddd..4e41c144a2902 100644 ---- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig -+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig -@@ -2,6 +2,7 @@ - config DRM_HISI_HIBMC - tristate "DRM Support for Hisilicon Hibmc" - depends on DRM && PCI && (ARM64 || COMPILE_TEST) -+ depends on MMU - select DRM_KMS_HELPER - select DRM_VRAM_HELPER - select DRM_TTM -diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c -index 5a957acebfd62..82ad8fe7440c0 100644 ---- a/drivers/gpu/drm/i915/display/g4x_dp.c -+++ b/drivers/gpu/drm/i915/display/g4x_dp.c -@@ -395,26 +395,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, - intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->dp_m_n); - -- if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && -- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { -- /* -- * This is a big fat ugly hack. -- * -- * Some machines in UEFI boot mode provide us a VBT that has 18 -- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons -- * unknown we fail to light up. Yet the same BIOS boots up with -- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as -- * max, not what it tells us to use. -- * -- * Note: This will still be broken if the eDP panel is not lit -- * up by the BIOS, and thus we can't get the mode at module -- * load. -- */ -- drm_dbg_kms(&dev_priv->drm, -- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", -- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); -- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; -- } -+ if (intel_dp_is_edp(intel_dp)) -+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp); - } - - static void -diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c -index 5508ebb9eb434..f416499dad6f3 100644 ---- a/drivers/gpu/drm/i915/display/icl_dsi.c -+++ b/drivers/gpu/drm/i915/display/icl_dsi.c -@@ -1864,7 +1864,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) - { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); -- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; -+ struct intel_connector *connector = intel_dsi->attached_connector; -+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; - u32 tlpx_ns; - u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; - u32 ths_prepare_ns, tclk_trail_ns; -@@ -2051,6 +2052,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) - /* attach connector to encoder */ - intel_connector_attach_encoder(intel_connector, encoder); - -+ intel_bios_init_panel(dev_priv, &intel_connector->panel); -+ - mutex_lock(&dev->mode_config.mutex); - intel_panel_add_vbt_lfp_fixed_mode(intel_connector); - mutex_unlock(&dev->mode_config.mutex); -@@ -2064,13 +2067,20 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) - - intel_backlight_setup(intel_connector, INVALID_PIPE); - -- if (dev_priv->vbt.dsi.config->dual_link) -+ if (intel_connector->panel.vbt.dsi.config->dual_link) - intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); - else - intel_dsi->ports = BIT(port); - -- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports; -- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports; -+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) -+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; -+ -+ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; -+ -+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) -+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; -+ -+ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; - - for_each_dsi_port(port, intel_dsi->ports) { - struct intel_dsi_host *host; -diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c -index 3e200a2e4ba29..5182bb66bd289 100644 ---- a/drivers/gpu/drm/i915/display/intel_backlight.c -+++ b/drivers/gpu/drm/i915/display/intel_backlight.c -@@ -1158,9 +1158,10 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) - return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); - } - --static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv) -+static u16 get_vbt_pwm_freq(struct intel_connector *connector) - { -- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; -+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev); -+ u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz; - - if (pwm_freq_hz) { - drm_dbg_kms(&dev_priv->drm, -@@ -1180,7 +1181,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) - { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; -- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); -+ u16 pwm_freq_hz = get_vbt_pwm_freq(connector); - u32 pwm; - - if (!panel->backlight.pwm_funcs->hz_to_pwm) { -@@ -1217,11 +1218,11 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) - * against this by letting the minimum be at most (arbitrarily chosen) - * 25% of the max. - */ -- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); -- if (min != dev_priv->vbt.backlight.min_brightness) { -+ min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64); -+ if (min != connector->panel.vbt.backlight.min_brightness) { - drm_dbg_kms(&dev_priv->drm, - "clamping VBT min backlight %d/255 to %d/255\n", -- dev_priv->vbt.backlight.min_brightness, min); -+ connector->panel.vbt.backlight.min_brightness, min); - } - - /* vbt value is a coefficient in range [0..255] */ -@@ -1410,7 +1411,7 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) - struct intel_panel *panel = &connector->panel; - u32 pwm_ctl, val; - -- panel->backlight.controller = dev_priv->vbt.backlight.controller; -+ panel->backlight.controller = connector->panel.vbt.backlight.controller; - - pwm_ctl = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); -@@ -1483,7 +1484,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, - u32 level; - - /* Get the right PWM chip for DSI backlight according to VBT */ -- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { -+ if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { - panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight"); - desc = "PMIC"; - } else { -@@ -1512,11 +1513,11 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, - - drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", - NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, -- get_vbt_pwm_freq(dev_priv), level); -+ get_vbt_pwm_freq(connector), level); - } else { - /* Set period from VBT frequency, leave other settings at 0. */ - panel->backlight.pwm_state.period = -- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv); -+ NSEC_PER_SEC / get_vbt_pwm_freq(connector); - } - - drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n", -@@ -1601,7 +1602,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) - struct intel_panel *panel = &connector->panel; - int ret; - -- if (!dev_priv->vbt.backlight.present) { -+ if (!connector->panel.vbt.backlight.present) { - if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { - drm_dbg_kms(&dev_priv->drm, - "no backlight present per VBT, but present per quirk\n"); -diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c -index 91caf4523b34d..b5de61fe9cc67 100644 ---- a/drivers/gpu/drm/i915/display/intel_bios.c -+++ b/drivers/gpu/drm/i915/display/intel_bios.c -@@ -682,7 +682,8 @@ static int get_panel_type(struct drm_i915_private *i915) - - /* Parse general panel options */ - static void --parse_panel_options(struct drm_i915_private *i915) -+parse_panel_options(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_lvds_options *lvds_options; - int panel_type; -@@ -692,11 +693,11 @@ parse_panel_options(struct drm_i915_private *i915) - if (!lvds_options) - return; - -- i915->vbt.lvds_dither = lvds_options->pixel_dither; -+ panel->vbt.lvds_dither = lvds_options->pixel_dither; - - panel_type = get_panel_type(i915); - -- i915->vbt.panel_type = panel_type; -+ panel->vbt.panel_type = panel_type; - - drrs_mode = (lvds_options->dps_panel_type_bits - >> (panel_type * 2)) & MODE_MASK; -@@ -707,16 +708,16 @@ parse_panel_options(struct drm_i915_private *i915) - */ - switch (drrs_mode) { - case 0: -- i915->vbt.drrs_type = DRRS_TYPE_STATIC; -+ panel->vbt.drrs_type = DRRS_TYPE_STATIC; - drm_dbg_kms(&i915->drm, "DRRS supported mode is static\n"); - break; - case 2: -- i915->vbt.drrs_type = DRRS_TYPE_SEAMLESS; -+ panel->vbt.drrs_type = DRRS_TYPE_SEAMLESS; - drm_dbg_kms(&i915->drm, - "DRRS supported mode is seamless\n"); - break; - default: -- i915->vbt.drrs_type = DRRS_TYPE_NONE; -+ panel->vbt.drrs_type = DRRS_TYPE_NONE; - drm_dbg_kms(&i915->drm, - "DRRS not supported (VBT input)\n"); - break; -@@ -725,13 +726,14 @@ parse_panel_options(struct drm_i915_private *i915) - - static void - parse_lfp_panel_dtd(struct drm_i915_private *i915, -+ struct intel_panel *panel, - const struct bdb_lvds_lfp_data *lvds_lfp_data, - const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs) - { - const struct lvds_dvo_timing *panel_dvo_timing; - const struct lvds_fp_timing *fp_timing; - struct drm_display_mode *panel_fixed_mode; -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - - panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, - lvds_lfp_data_ptrs, -@@ -743,7 +745,7 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915, - - fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); - -- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; -+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; - - drm_dbg_kms(&i915->drm, - "Found panel mode in BIOS VBT legacy lfp table: " DRM_MODE_FMT "\n", -@@ -756,20 +758,21 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915, - /* check the resolution, just to be sure */ - if (fp_timing->x_res == panel_fixed_mode->hdisplay && - fp_timing->y_res == panel_fixed_mode->vdisplay) { -- i915->vbt.bios_lvds_val = fp_timing->lvds_reg_val; -+ panel->vbt.bios_lvds_val = fp_timing->lvds_reg_val; - drm_dbg_kms(&i915->drm, - "VBT initial LVDS value %x\n", -- i915->vbt.bios_lvds_val); -+ panel->vbt.bios_lvds_val); - } - } - - static void --parse_lfp_data(struct drm_i915_private *i915) -+parse_lfp_data(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_lvds_lfp_data *data; - const struct bdb_lvds_lfp_data_tail *tail; - const struct bdb_lvds_lfp_data_ptrs *ptrs; -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); - if (!ptrs) -@@ -779,24 +782,25 @@ parse_lfp_data(struct drm_i915_private *i915) - if (!data) - return; - -- if (!i915->vbt.lfp_lvds_vbt_mode) -- parse_lfp_panel_dtd(i915, data, ptrs); -+ if (!panel->vbt.lfp_lvds_vbt_mode) -+ parse_lfp_panel_dtd(i915, panel, data, ptrs); - - tail = get_lfp_data_tail(data, ptrs); - if (!tail) - return; - - if (i915->vbt.version >= 188) { -- i915->vbt.seamless_drrs_min_refresh_rate = -+ panel->vbt.seamless_drrs_min_refresh_rate = - tail->seamless_drrs_min_refresh_rate[panel_type]; - drm_dbg_kms(&i915->drm, - "Seamless DRRS min refresh rate: %d Hz\n", -- i915->vbt.seamless_drrs_min_refresh_rate); -+ panel->vbt.seamless_drrs_min_refresh_rate); - } - } - - static void --parse_generic_dtd(struct drm_i915_private *i915) -+parse_generic_dtd(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_generic_dtd *generic_dtd; - const struct generic_dtd_entry *dtd; -@@ -831,14 +835,14 @@ parse_generic_dtd(struct drm_i915_private *i915) - - num_dtd = (get_blocksize(generic_dtd) - - sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size; -- if (i915->vbt.panel_type >= num_dtd) { -+ if (panel->vbt.panel_type >= num_dtd) { - drm_err(&i915->drm, - "Panel type %d not found in table of %d DTD's\n", -- i915->vbt.panel_type, num_dtd); -+ panel->vbt.panel_type, num_dtd); - return; - } - -- dtd = &generic_dtd->dtd[i915->vbt.panel_type]; -+ dtd = &generic_dtd->dtd[panel->vbt.panel_type]; - - panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); - if (!panel_fixed_mode) -@@ -881,15 +885,16 @@ parse_generic_dtd(struct drm_i915_private *i915) - "Found panel mode in BIOS VBT generic dtd table: " DRM_MODE_FMT "\n", - DRM_MODE_ARG(panel_fixed_mode)); - -- i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; -+ panel->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; - } - - static void --parse_lfp_backlight(struct drm_i915_private *i915) -+parse_lfp_backlight(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_lfp_backlight_data *backlight_data; - const struct lfp_backlight_data_entry *entry; -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - u16 level; - - backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT); -@@ -905,15 +910,15 @@ parse_lfp_backlight(struct drm_i915_private *i915) - - entry = &backlight_data->data[panel_type]; - -- i915->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; -- if (!i915->vbt.backlight.present) { -+ panel->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; -+ if (!panel->vbt.backlight.present) { - drm_dbg_kms(&i915->drm, - "PWM backlight not present in VBT (type %u)\n", - entry->type); - return; - } - -- i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; -+ panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; - if (i915->vbt.version >= 191) { - size_t exp_size; - -@@ -928,13 +933,13 @@ parse_lfp_backlight(struct drm_i915_private *i915) - const struct lfp_backlight_control_method *method; - - method = &backlight_data->backlight_control[panel_type]; -- i915->vbt.backlight.type = method->type; -- i915->vbt.backlight.controller = method->controller; -+ panel->vbt.backlight.type = method->type; -+ panel->vbt.backlight.controller = method->controller; - } - } - -- i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; -- i915->vbt.backlight.active_low_pwm = entry->active_low_pwm; -+ panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; -+ panel->vbt.backlight.active_low_pwm = entry->active_low_pwm; - - if (i915->vbt.version >= 234) { - u16 min_level; -@@ -955,28 +960,29 @@ parse_lfp_backlight(struct drm_i915_private *i915) - drm_warn(&i915->drm, "Brightness min level > 255\n"); - level = 255; - } -- i915->vbt.backlight.min_brightness = min_level; -+ panel->vbt.backlight.min_brightness = min_level; - -- i915->vbt.backlight.brightness_precision_bits = -+ panel->vbt.backlight.brightness_precision_bits = - backlight_data->brightness_precision_bits[panel_type]; - } else { - level = backlight_data->level[panel_type]; -- i915->vbt.backlight.min_brightness = entry->min_brightness; -+ panel->vbt.backlight.min_brightness = entry->min_brightness; - } - - drm_dbg_kms(&i915->drm, - "VBT backlight PWM modulation frequency %u Hz, " - "active %s, min brightness %u, level %u, controller %u\n", -- i915->vbt.backlight.pwm_freq_hz, -- i915->vbt.backlight.active_low_pwm ? "low" : "high", -- i915->vbt.backlight.min_brightness, -+ panel->vbt.backlight.pwm_freq_hz, -+ panel->vbt.backlight.active_low_pwm ? "low" : "high", -+ panel->vbt.backlight.min_brightness, - level, -- i915->vbt.backlight.controller); -+ panel->vbt.backlight.controller); - } - - /* Try to find sdvo panel data */ - static void --parse_sdvo_panel_data(struct drm_i915_private *i915) -+parse_sdvo_panel_data(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_sdvo_panel_dtds *dtds; - struct drm_display_mode *panel_fixed_mode; -@@ -1009,7 +1015,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915) - - fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]); - -- i915->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; -+ panel->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; - - drm_dbg_kms(&i915->drm, - "Found SDVO panel mode in BIOS VBT tables: " DRM_MODE_FMT "\n", -@@ -1188,6 +1194,17 @@ parse_driver_features(struct drm_i915_private *i915) - driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS) - i915->vbt.int_lvds_support = 0; - } -+} -+ -+static void -+parse_panel_driver_features(struct drm_i915_private *i915, -+ struct intel_panel *panel) -+{ -+ const struct bdb_driver_features *driver; -+ -+ driver = find_section(i915, BDB_DRIVER_FEATURES); -+ if (!driver) -+ return; - - if (i915->vbt.version < 228) { - drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n", -@@ -1199,17 +1216,18 @@ parse_driver_features(struct drm_i915_private *i915) - * driver->drrs_enabled=false - */ - if (!driver->drrs_enabled) -- i915->vbt.drrs_type = DRRS_TYPE_NONE; -+ panel->vbt.drrs_type = DRRS_TYPE_NONE; - -- i915->vbt.psr.enable = driver->psr_enabled; -+ panel->vbt.psr.enable = driver->psr_enabled; - } - } - - static void --parse_power_conservation_features(struct drm_i915_private *i915) -+parse_power_conservation_features(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_lfp_power *power; -- u8 panel_type = i915->vbt.panel_type; -+ u8 panel_type = panel->vbt.panel_type; - - if (i915->vbt.version < 228) - return; -@@ -1218,7 +1236,7 @@ parse_power_conservation_features(struct drm_i915_private *i915) - if (!power) - return; - -- i915->vbt.psr.enable = power->psr & BIT(panel_type); -+ panel->vbt.psr.enable = power->psr & BIT(panel_type); - - /* - * If DRRS is not supported, drrs_type has to be set to 0. -@@ -1227,19 +1245,20 @@ parse_power_conservation_features(struct drm_i915_private *i915) - * power->drrs & BIT(panel_type)=false - */ - if (!(power->drrs & BIT(panel_type))) -- i915->vbt.drrs_type = DRRS_TYPE_NONE; -+ panel->vbt.drrs_type = DRRS_TYPE_NONE; - - if (i915->vbt.version >= 232) -- i915->vbt.edp.hobl = power->hobl & BIT(panel_type); -+ panel->vbt.edp.hobl = power->hobl & BIT(panel_type); - } - - static void --parse_edp(struct drm_i915_private *i915) -+parse_edp(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_edp *edp; - const struct edp_power_seq *edp_pps; - const struct edp_fast_link_params *edp_link_params; -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - - edp = find_section(i915, BDB_EDP); - if (!edp) -@@ -1247,13 +1266,13 @@ parse_edp(struct drm_i915_private *i915) - - switch ((edp->color_depth >> (panel_type * 2)) & 3) { - case EDP_18BPP: -- i915->vbt.edp.bpp = 18; -+ panel->vbt.edp.bpp = 18; - break; - case EDP_24BPP: -- i915->vbt.edp.bpp = 24; -+ panel->vbt.edp.bpp = 24; - break; - case EDP_30BPP: -- i915->vbt.edp.bpp = 30; -+ panel->vbt.edp.bpp = 30; - break; - } - -@@ -1261,14 +1280,14 @@ parse_edp(struct drm_i915_private *i915) - edp_pps = &edp->power_seqs[panel_type]; - edp_link_params = &edp->fast_link_params[panel_type]; - -- i915->vbt.edp.pps = *edp_pps; -+ panel->vbt.edp.pps = *edp_pps; - - switch (edp_link_params->rate) { - case EDP_RATE_1_62: -- i915->vbt.edp.rate = DP_LINK_BW_1_62; -+ panel->vbt.edp.rate = DP_LINK_BW_1_62; - break; - case EDP_RATE_2_7: -- i915->vbt.edp.rate = DP_LINK_BW_2_7; -+ panel->vbt.edp.rate = DP_LINK_BW_2_7; - break; - default: - drm_dbg_kms(&i915->drm, -@@ -1279,13 +1298,13 @@ parse_edp(struct drm_i915_private *i915) - - switch (edp_link_params->lanes) { - case EDP_LANE_1: -- i915->vbt.edp.lanes = 1; -+ panel->vbt.edp.lanes = 1; - break; - case EDP_LANE_2: -- i915->vbt.edp.lanes = 2; -+ panel->vbt.edp.lanes = 2; - break; - case EDP_LANE_4: -- i915->vbt.edp.lanes = 4; -+ panel->vbt.edp.lanes = 4; - break; - default: - drm_dbg_kms(&i915->drm, -@@ -1296,16 +1315,16 @@ parse_edp(struct drm_i915_private *i915) - - switch (edp_link_params->preemphasis) { - case EDP_PREEMPHASIS_NONE: -- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0; -+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0; - break; - case EDP_PREEMPHASIS_3_5dB: -- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1; -+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1; - break; - case EDP_PREEMPHASIS_6dB: -- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2; -+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2; - break; - case EDP_PREEMPHASIS_9_5dB: -- i915->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; -+ panel->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; - break; - default: - drm_dbg_kms(&i915->drm, -@@ -1316,16 +1335,16 @@ parse_edp(struct drm_i915_private *i915) - - switch (edp_link_params->vswing) { - case EDP_VSWING_0_4V: -- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0; -+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0; - break; - case EDP_VSWING_0_6V: -- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1; -+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1; - break; - case EDP_VSWING_0_8V: -- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2; -+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2; - break; - case EDP_VSWING_1_2V: -- i915->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; -+ panel->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; - break; - default: - drm_dbg_kms(&i915->drm, -@@ -1339,24 +1358,25 @@ parse_edp(struct drm_i915_private *i915) - - /* Don't read from VBT if module parameter has valid value*/ - if (i915->params.edp_vswing) { -- i915->vbt.edp.low_vswing = -+ panel->vbt.edp.low_vswing = - i915->params.edp_vswing == 1; - } else { - vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; -- i915->vbt.edp.low_vswing = vswing == 0; -+ panel->vbt.edp.low_vswing = vswing == 0; - } - } - -- i915->vbt.edp.drrs_msa_timing_delay = -+ panel->vbt.edp.drrs_msa_timing_delay = - (edp->sdrrs_msa_timing_delay >> (panel_type * 2)) & 3; - } - - static void --parse_psr(struct drm_i915_private *i915) -+parse_psr(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_psr *psr; - const struct psr_table *psr_table; -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - - psr = find_section(i915, BDB_PSR); - if (!psr) { -@@ -1366,11 +1386,11 @@ parse_psr(struct drm_i915_private *i915) - - psr_table = &psr->psr_table[panel_type]; - -- i915->vbt.psr.full_link = psr_table->full_link; -- i915->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup; -+ panel->vbt.psr.full_link = psr_table->full_link; -+ panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup; - - /* Allowed VBT values goes from 0 to 15 */ -- i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : -+ panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : - psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames; - - /* -@@ -1381,13 +1401,13 @@ parse_psr(struct drm_i915_private *i915) - (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) { - switch (psr_table->tp1_wakeup_time) { - case 0: -- i915->vbt.psr.tp1_wakeup_time_us = 500; -+ panel->vbt.psr.tp1_wakeup_time_us = 500; - break; - case 1: -- i915->vbt.psr.tp1_wakeup_time_us = 100; -+ panel->vbt.psr.tp1_wakeup_time_us = 100; - break; - case 3: -- i915->vbt.psr.tp1_wakeup_time_us = 0; -+ panel->vbt.psr.tp1_wakeup_time_us = 0; - break; - default: - drm_dbg_kms(&i915->drm, -@@ -1395,19 +1415,19 @@ parse_psr(struct drm_i915_private *i915) - psr_table->tp1_wakeup_time); - fallthrough; - case 2: -- i915->vbt.psr.tp1_wakeup_time_us = 2500; -+ panel->vbt.psr.tp1_wakeup_time_us = 2500; - break; - } - - switch (psr_table->tp2_tp3_wakeup_time) { - case 0: -- i915->vbt.psr.tp2_tp3_wakeup_time_us = 500; -+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 500; - break; - case 1: -- i915->vbt.psr.tp2_tp3_wakeup_time_us = 100; -+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 100; - break; - case 3: -- i915->vbt.psr.tp2_tp3_wakeup_time_us = 0; -+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 0; - break; - default: - drm_dbg_kms(&i915->drm, -@@ -1415,12 +1435,12 @@ parse_psr(struct drm_i915_private *i915) - psr_table->tp2_tp3_wakeup_time); - fallthrough; - case 2: -- i915->vbt.psr.tp2_tp3_wakeup_time_us = 2500; -+ panel->vbt.psr.tp2_tp3_wakeup_time_us = 2500; - break; - } - } else { -- i915->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100; -- i915->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; -+ panel->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100; -+ panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; - } - - if (i915->vbt.version >= 226) { -@@ -1442,62 +1462,66 @@ parse_psr(struct drm_i915_private *i915) - wakeup_time = 2500; - break; - } -- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time; -+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time; - } else { - /* Reusing PSR1 wakeup time for PSR2 in older VBTs */ -- i915->vbt.psr.psr2_tp2_tp3_wakeup_time_us = i915->vbt.psr.tp2_tp3_wakeup_time_us; -+ panel->vbt.psr.psr2_tp2_tp3_wakeup_time_us = panel->vbt.psr.tp2_tp3_wakeup_time_us; - } - } - - static void parse_dsi_backlight_ports(struct drm_i915_private *i915, -- u16 version, enum port port) -+ struct intel_panel *panel, -+ enum port port) - { -- if (!i915->vbt.dsi.config->dual_link || version < 197) { -- i915->vbt.dsi.bl_ports = BIT(port); -- if (i915->vbt.dsi.config->cabc_supported) -- i915->vbt.dsi.cabc_ports = BIT(port); -+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C; -+ -+ if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) { -+ panel->vbt.dsi.bl_ports = BIT(port); -+ if (panel->vbt.dsi.config->cabc_supported) -+ panel->vbt.dsi.cabc_ports = BIT(port); - - return; - } - -- switch (i915->vbt.dsi.config->dl_dcs_backlight_ports) { -+ switch (panel->vbt.dsi.config->dl_dcs_backlight_ports) { - case DL_DCS_PORT_A: -- i915->vbt.dsi.bl_ports = BIT(PORT_A); -+ panel->vbt.dsi.bl_ports = BIT(PORT_A); - break; - case DL_DCS_PORT_C: -- i915->vbt.dsi.bl_ports = BIT(PORT_C); -+ panel->vbt.dsi.bl_ports = BIT(port_bc); - break; - default: - case DL_DCS_PORT_A_AND_C: -- i915->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C); -+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc); - break; - } - -- if (!i915->vbt.dsi.config->cabc_supported) -+ if (!panel->vbt.dsi.config->cabc_supported) - return; - -- switch (i915->vbt.dsi.config->dl_dcs_cabc_ports) { -+ switch (panel->vbt.dsi.config->dl_dcs_cabc_ports) { - case DL_DCS_PORT_A: -- i915->vbt.dsi.cabc_ports = BIT(PORT_A); -+ panel->vbt.dsi.cabc_ports = BIT(PORT_A); - break; - case DL_DCS_PORT_C: -- i915->vbt.dsi.cabc_ports = BIT(PORT_C); -+ panel->vbt.dsi.cabc_ports = BIT(port_bc); - break; - default: - case DL_DCS_PORT_A_AND_C: -- i915->vbt.dsi.cabc_ports = -- BIT(PORT_A) | BIT(PORT_C); -+ panel->vbt.dsi.cabc_ports = -+ BIT(PORT_A) | BIT(port_bc); - break; - } - } - - static void --parse_mipi_config(struct drm_i915_private *i915) -+parse_mipi_config(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - const struct bdb_mipi_config *start; - const struct mipi_config *config; - const struct mipi_pps_data *pps; -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - enum port port; - - /* parse MIPI blocks only if LFP type is MIPI */ -@@ -1505,7 +1529,7 @@ parse_mipi_config(struct drm_i915_private *i915) - return; - - /* Initialize this to undefined indicating no generic MIPI support */ -- i915->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID; -+ panel->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID; - - /* Block #40 is already parsed and panel_fixed_mode is - * stored in i915->lfp_lvds_vbt_mode -@@ -1532,17 +1556,17 @@ parse_mipi_config(struct drm_i915_private *i915) - pps = &start->pps[panel_type]; - - /* store as of now full data. Trim when we realise all is not needed */ -- i915->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL); -- if (!i915->vbt.dsi.config) -+ panel->vbt.dsi.config = kmemdup(config, sizeof(struct mipi_config), GFP_KERNEL); -+ if (!panel->vbt.dsi.config) - return; - -- i915->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL); -- if (!i915->vbt.dsi.pps) { -- kfree(i915->vbt.dsi.config); -+ panel->vbt.dsi.pps = kmemdup(pps, sizeof(struct mipi_pps_data), GFP_KERNEL); -+ if (!panel->vbt.dsi.pps) { -+ kfree(panel->vbt.dsi.config); - return; - } - -- parse_dsi_backlight_ports(i915, i915->vbt.version, port); -+ parse_dsi_backlight_ports(i915, panel, port); - - /* FIXME is the 90 vs. 270 correct? */ - switch (config->rotation) { -@@ -1551,25 +1575,25 @@ parse_mipi_config(struct drm_i915_private *i915) - * Most (all?) VBTs claim 0 degrees despite having - * an upside down panel, thus we do not trust this. - */ -- i915->vbt.dsi.orientation = -+ panel->vbt.dsi.orientation = - DRM_MODE_PANEL_ORIENTATION_UNKNOWN; - break; - case ENABLE_ROTATION_90: -- i915->vbt.dsi.orientation = -+ panel->vbt.dsi.orientation = - DRM_MODE_PANEL_ORIENTATION_RIGHT_UP; - break; - case ENABLE_ROTATION_180: -- i915->vbt.dsi.orientation = -+ panel->vbt.dsi.orientation = - DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; - break; - case ENABLE_ROTATION_270: -- i915->vbt.dsi.orientation = -+ panel->vbt.dsi.orientation = - DRM_MODE_PANEL_ORIENTATION_LEFT_UP; - break; - } - - /* We have mandatory mipi config blocks. Initialize as generic panel */ -- i915->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; -+ panel->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; - } - - /* Find the sequence block and size for the given panel. */ -@@ -1732,13 +1756,14 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) - * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, - * skip all delay + gpio operands and stop at the first DSI packet op. - */ --static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915) -+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { -- const u8 *data = i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; -+ const u8 *data = panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; - int index, len; - - if (drm_WARN_ON(&i915->drm, -- !data || i915->vbt.dsi.seq_version != 1)) -+ !data || panel->vbt.dsi.seq_version != 1)) - return 0; - - /* index = 1 to skip sequence byte */ -@@ -1766,7 +1791,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915) - * these devices we split the init OTP sequence into a deassert sequence and - * the actual init OTP part. - */ --static void fixup_mipi_sequences(struct drm_i915_private *i915) -+static void fixup_mipi_sequences(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { - u8 *init_otp; - int len; -@@ -1776,18 +1802,18 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915) - return; - - /* Limit this to v1 vid-mode sequences */ -- if (i915->vbt.dsi.config->is_cmd_mode || -- i915->vbt.dsi.seq_version != 1) -+ if (panel->vbt.dsi.config->is_cmd_mode || -+ panel->vbt.dsi.seq_version != 1) - return; - - /* Only do this if there are otp and assert seqs and no deassert seq */ -- if (!i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || -- !i915->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || -- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) -+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || -+ !panel->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || -+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) - return; - - /* The deassert-sequence ends at the first DSI packet */ -- len = get_init_otp_deassert_fragment_len(i915); -+ len = get_init_otp_deassert_fragment_len(i915, panel); - if (!len) - return; - -@@ -1795,25 +1821,26 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915) - "Using init OTP fragment to deassert reset\n"); - - /* Copy the fragment, update seq byte and terminate it */ -- init_otp = (u8 *)i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; -- i915->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); -- if (!i915->vbt.dsi.deassert_seq) -+ init_otp = (u8 *)panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; -+ panel->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); -+ if (!panel->vbt.dsi.deassert_seq) - return; -- i915->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; -- i915->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; -+ panel->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; -+ panel->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; - /* Use the copy for deassert */ -- i915->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = -- i915->vbt.dsi.deassert_seq; -+ panel->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = -+ panel->vbt.dsi.deassert_seq; - /* Replace the last byte of the fragment with init OTP seq byte */ - init_otp[len - 1] = MIPI_SEQ_INIT_OTP; - /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ -- i915->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; -+ panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; - } - - static void --parse_mipi_sequence(struct drm_i915_private *i915) -+parse_mipi_sequence(struct drm_i915_private *i915, -+ struct intel_panel *panel) - { -- int panel_type = i915->vbt.panel_type; -+ int panel_type = panel->vbt.panel_type; - const struct bdb_mipi_sequence *sequence; - const u8 *seq_data; - u32 seq_size; -@@ -1821,7 +1848,7 @@ parse_mipi_sequence(struct drm_i915_private *i915) - int index = 0; - - /* Only our generic panel driver uses the sequence block. */ -- if (i915->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) -+ if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) - return; - - sequence = find_section(i915, BDB_MIPI_SEQUENCE); -@@ -1867,7 +1894,7 @@ parse_mipi_sequence(struct drm_i915_private *i915) - drm_dbg_kms(&i915->drm, - "Unsupported sequence %u\n", seq_id); - -- i915->vbt.dsi.sequence[seq_id] = data + index; -+ panel->vbt.dsi.sequence[seq_id] = data + index; - - if (sequence->version >= 3) - index = goto_next_sequence_v3(data, index, seq_size); -@@ -1880,18 +1907,18 @@ parse_mipi_sequence(struct drm_i915_private *i915) - } - } - -- i915->vbt.dsi.data = data; -- i915->vbt.dsi.size = seq_size; -- i915->vbt.dsi.seq_version = sequence->version; -+ panel->vbt.dsi.data = data; -+ panel->vbt.dsi.size = seq_size; -+ panel->vbt.dsi.seq_version = sequence->version; - -- fixup_mipi_sequences(i915); -+ fixup_mipi_sequences(i915, panel); - - drm_dbg(&i915->drm, "MIPI related VBT parsing complete\n"); - return; - - err: - kfree(data); -- memset(i915->vbt.dsi.sequence, 0, sizeof(i915->vbt.dsi.sequence)); -+ memset(panel->vbt.dsi.sequence, 0, sizeof(panel->vbt.dsi.sequence)); - } - - static void -@@ -2645,15 +2672,6 @@ init_vbt_defaults(struct drm_i915_private *i915) - { - i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; - -- /* Default to having backlight */ -- i915->vbt.backlight.present = true; -- -- /* LFP panel data */ -- i915->vbt.lvds_dither = 1; -- -- /* SDVO panel data */ -- i915->vbt.sdvo_lvds_vbt_mode = NULL; -- - /* general features */ - i915->vbt.int_tv_support = 1; - i915->vbt.int_crt_support = 1; -@@ -2673,6 +2691,17 @@ init_vbt_defaults(struct drm_i915_private *i915) - i915->vbt.lvds_ssc_freq); - } - -+/* Common defaults which may be overridden by VBT. */ -+static void -+init_vbt_panel_defaults(struct intel_panel *panel) -+{ -+ /* Default to having backlight */ -+ panel->vbt.backlight.present = true; -+ -+ /* LFP panel data */ -+ panel->vbt.lvds_dither = true; -+} -+ - /* Defaults to initialize only if there is no VBT. */ - static void - init_vbt_missing_defaults(struct drm_i915_private *i915) -@@ -2959,17 +2988,7 @@ void intel_bios_init(struct drm_i915_private *i915) - /* Grab useful general definitions */ - parse_general_features(i915); - parse_general_definitions(i915); -- parse_panel_options(i915); -- parse_generic_dtd(i915); -- parse_lfp_data(i915); -- parse_lfp_backlight(i915); -- parse_sdvo_panel_data(i915); - parse_driver_features(i915); -- parse_power_conservation_features(i915); -- parse_edp(i915); -- parse_psr(i915); -- parse_mipi_config(i915); -- parse_mipi_sequence(i915); - - /* Depends on child device list */ - parse_compression_parameters(i915); -@@ -2988,6 +3007,24 @@ out: - kfree(oprom_vbt); - } - -+void intel_bios_init_panel(struct drm_i915_private *i915, -+ struct intel_panel *panel) -+{ -+ init_vbt_panel_defaults(panel); -+ -+ parse_panel_options(i915, panel); -+ parse_generic_dtd(i915, panel); -+ parse_lfp_data(i915, panel); -+ parse_lfp_backlight(i915, panel); -+ parse_sdvo_panel_data(i915, panel); -+ parse_panel_driver_features(i915, panel); -+ parse_power_conservation_features(i915, panel); -+ parse_edp(i915, panel); -+ parse_psr(i915, panel); -+ parse_mipi_config(i915, panel); -+ parse_mipi_sequence(i915, panel); -+} -+ - /** - * intel_bios_driver_remove - Free any resources allocated by intel_bios_init() - * @i915: i915 device instance -@@ -3007,19 +3044,22 @@ void intel_bios_driver_remove(struct drm_i915_private *i915) - list_del(&entry->node); - kfree(entry); - } -+} - -- kfree(i915->vbt.sdvo_lvds_vbt_mode); -- i915->vbt.sdvo_lvds_vbt_mode = NULL; -- kfree(i915->vbt.lfp_lvds_vbt_mode); -- i915->vbt.lfp_lvds_vbt_mode = NULL; -- kfree(i915->vbt.dsi.data); -- i915->vbt.dsi.data = NULL; -- kfree(i915->vbt.dsi.pps); -- i915->vbt.dsi.pps = NULL; -- kfree(i915->vbt.dsi.config); -- i915->vbt.dsi.config = NULL; -- kfree(i915->vbt.dsi.deassert_seq); -- i915->vbt.dsi.deassert_seq = NULL; -+void intel_bios_fini_panel(struct intel_panel *panel) -+{ -+ kfree(panel->vbt.sdvo_lvds_vbt_mode); -+ panel->vbt.sdvo_lvds_vbt_mode = NULL; -+ kfree(panel->vbt.lfp_lvds_vbt_mode); -+ panel->vbt.lfp_lvds_vbt_mode = NULL; -+ kfree(panel->vbt.dsi.data); -+ panel->vbt.dsi.data = NULL; -+ kfree(panel->vbt.dsi.pps); -+ panel->vbt.dsi.pps = NULL; -+ kfree(panel->vbt.dsi.config); -+ panel->vbt.dsi.config = NULL; -+ kfree(panel->vbt.dsi.deassert_seq); -+ panel->vbt.dsi.deassert_seq = NULL; - } - - /** -diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h -index 4709c4d298059..86129f015718d 100644 ---- a/drivers/gpu/drm/i915/display/intel_bios.h -+++ b/drivers/gpu/drm/i915/display/intel_bios.h -@@ -36,6 +36,7 @@ struct drm_i915_private; - struct intel_bios_encoder_data; - struct intel_crtc_state; - struct intel_encoder; -+struct intel_panel; - enum port; - - enum intel_backlight_type { -@@ -230,6 +231,9 @@ struct mipi_pps_data { - } __packed; - - void intel_bios_init(struct drm_i915_private *dev_priv); -+void intel_bios_init_panel(struct drm_i915_private *dev_priv, -+ struct intel_panel *panel); -+void intel_bios_fini_panel(struct intel_panel *panel); - void intel_bios_driver_remove(struct drm_i915_private *dev_priv); - bool intel_bios_is_valid_vbt(const void *buf, size_t size); - bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); -diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c -index 9e6fa59eabba7..333871cf3a2c5 100644 ---- a/drivers/gpu/drm/i915/display/intel_ddi.c -+++ b/drivers/gpu/drm/i915/display/intel_ddi.c -@@ -3433,26 +3433,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, - pipe_config->has_audio = - intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); - -- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && -- pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { -- /* -- * This is a big fat ugly hack. -- * -- * Some machines in UEFI boot mode provide us a VBT that has 18 -- * bpp and 1.62 GHz link bandwidth for eDP, which for reasons -- * unknown we fail to light up. Yet the same BIOS boots up with -- * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as -- * max, not what it tells us to use. -- * -- * Note: This will still be broken if the eDP panel is not lit -- * up by the BIOS, and thus we can't get the mode at module -- * load. -- */ -- drm_dbg_kms(&dev_priv->drm, -- "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", -- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); -- dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; -- } -+ if (encoder->type == INTEL_OUTPUT_EDP) -+ intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp); - - ddi_dotclock_get(pipe_config); - -diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c -index 85f58dd3df722..b490acd0ab691 100644 ---- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c -+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c -@@ -1062,17 +1062,18 @@ bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) - - static bool use_edp_hobl(struct intel_encoder *encoder) - { -- struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); -+ struct intel_connector *connector = intel_dp->attached_connector; - -- return i915->vbt.edp.hobl && !intel_dp->hobl_failed; -+ return connector->panel.vbt.edp.hobl && !intel_dp->hobl_failed; - } - - static bool use_edp_low_vswing(struct intel_encoder *encoder) - { -- struct drm_i915_private *i915 = to_i915(encoder->base.dev); -+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder); -+ struct intel_connector *connector = intel_dp->attached_connector; - -- return i915->vbt.edp.low_vswing; -+ return connector->panel.vbt.edp.low_vswing; - } - - static const struct intel_ddi_buf_trans * -diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h -index 408152f9f46a4..e2561c5d4953c 100644 ---- a/drivers/gpu/drm/i915/display/intel_display_types.h -+++ b/drivers/gpu/drm/i915/display/intel_display_types.h -@@ -279,6 +279,73 @@ struct intel_panel_bl_funcs { - u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); - }; - -+enum drrs_type { -+ DRRS_TYPE_NONE, -+ DRRS_TYPE_STATIC, -+ DRRS_TYPE_SEAMLESS, -+}; -+ -+struct intel_vbt_panel_data { -+ struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ -+ struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ -+ -+ /* Feature bits */ -+ unsigned int panel_type:4; -+ unsigned int lvds_dither:1; -+ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ -+ -+ u8 seamless_drrs_min_refresh_rate; -+ enum drrs_type drrs_type; -+ -+ struct { -+ int rate; -+ int lanes; -+ int preemphasis; -+ int vswing; -+ int bpp; -+ struct edp_power_seq pps; -+ u8 drrs_msa_timing_delay; -+ bool low_vswing; -+ bool initialized; -+ bool hobl; -+ } edp; -+ -+ struct { -+ bool enable; -+ bool full_link; -+ bool require_aux_wakeup; -+ int idle_frames; -+ int tp1_wakeup_time_us; -+ int tp2_tp3_wakeup_time_us; -+ int psr2_tp2_tp3_wakeup_time_us; -+ } psr; -+ -+ struct { -+ u16 pwm_freq_hz; -+ u16 brightness_precision_bits; -+ bool present; -+ bool active_low_pwm; -+ u8 min_brightness; /* min_brightness/255 of max */ -+ u8 controller; /* brightness controller number */ -+ enum intel_backlight_type type; -+ } backlight; -+ -+ /* MIPI DSI */ -+ struct { -+ u16 panel_id; -+ struct mipi_config *config; -+ struct mipi_pps_data *pps; -+ u16 bl_ports; -+ u16 cabc_ports; -+ u8 seq_version; -+ u32 size; -+ u8 *data; -+ const u8 *sequence[MIPI_SEQ_MAX]; -+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ -+ enum drm_panel_orientation orientation; -+ } dsi; -+}; -+ - struct intel_panel { - struct list_head fixed_modes; - -@@ -318,6 +385,8 @@ struct intel_panel { - const struct intel_panel_bl_funcs *pwm_funcs; - void (*power)(struct intel_connector *, bool enable); - } backlight; -+ -+ struct intel_vbt_panel_data vbt; - }; - - struct intel_digital_port; -diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c -index fe8b6b72970a2..0efec6023fbe8 100644 ---- a/drivers/gpu/drm/i915/display/intel_dp.c -+++ b/drivers/gpu/drm/i915/display/intel_dp.c -@@ -1246,11 +1246,12 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp, - if (intel_dp_is_edp(intel_dp)) { - /* Get bpp from vbt only for panels that dont have bpp in edid */ - if (intel_connector->base.display_info.bpc == 0 && -- dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { -+ intel_connector->panel.vbt.edp.bpp && -+ intel_connector->panel.vbt.edp.bpp < bpp) { - drm_dbg_kms(&dev_priv->drm, - "clamping bpp for eDP panel to BIOS-provided %i\n", -- dev_priv->vbt.edp.bpp); -- bpp = dev_priv->vbt.edp.bpp; -+ intel_connector->panel.vbt.edp.bpp); -+ bpp = intel_connector->panel.vbt.edp.bpp; - } - } - -@@ -1907,7 +1908,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, - } - - if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) -- pipe_config->msa_timing_delay = i915->vbt.edp.drrs_msa_timing_delay; -+ pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay; - - pipe_config->has_drrs = true; - -@@ -2737,6 +2738,33 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector, - DRM_MODE_ARG(mode)); - } - -+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp) -+{ -+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); -+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder); -+ struct intel_connector *connector = intel_dp->attached_connector; -+ -+ if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) { -+ /* -+ * This is a big fat ugly hack. -+ * -+ * Some machines in UEFI boot mode provide us a VBT that has 18 -+ * bpp and 1.62 GHz link bandwidth for eDP, which for reasons -+ * unknown we fail to light up. Yet the same BIOS boots up with -+ * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as -+ * max, not what it tells us to use. -+ * -+ * Note: This will still be broken if the eDP panel is not lit -+ * up by the BIOS, and thus we can't get the mode at module -+ * load. -+ */ -+ drm_dbg_kms(&dev_priv->drm, -+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", -+ pipe_bpp, connector->panel.vbt.edp.bpp); -+ connector->panel.vbt.edp.bpp = pipe_bpp; -+ } -+} -+ - static void intel_edp_mso_init(struct intel_dp *intel_dp) - { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); -@@ -5212,8 +5240,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, - } - intel_connector->edid = edid; - -+ intel_bios_init_panel(dev_priv, &intel_connector->panel); -+ - intel_panel_add_edid_fixed_modes(intel_connector, -- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE); -+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE); - - /* MSO requires information from the EDID */ - intel_edp_mso_init(intel_dp); -diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h -index d457e17bdc57e..a54902c713a34 100644 ---- a/drivers/gpu/drm/i915/display/intel_dp.h -+++ b/drivers/gpu/drm/i915/display/intel_dp.h -@@ -29,6 +29,7 @@ struct link_config_limits { - int min_bpp, max_bpp; - }; - -+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp); - void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - struct link_config_limits *limits); -@@ -63,6 +64,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, - void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); - void intel_edp_backlight_off(const struct drm_connector_state *conn_state); -+void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp); - void intel_dp_mst_suspend(struct drm_i915_private *dev_priv); - void intel_dp_mst_resume(struct drm_i915_private *dev_priv); - int intel_dp_max_link_rate(struct intel_dp *intel_dp); -diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c -index fb6cf30ee6281..c92d5bb2326a3 100644 ---- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c -+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c -@@ -370,7 +370,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, - int ret; - - ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info, -- i915->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd, -+ panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd, - ¤t_level, ¤t_mode); - if (ret < 0) - return ret; -@@ -454,7 +454,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) - case INTEL_DP_AUX_BACKLIGHT_OFF: - return -ENODEV; - case INTEL_DP_AUX_BACKLIGHT_AUTO: -- switch (i915->vbt.backlight.type) { -+ switch (panel->vbt.backlight.type) { - case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE: - try_vesa_interface = true; - break; -@@ -466,7 +466,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) - } - break; - case INTEL_DP_AUX_BACKLIGHT_ON: -- if (i915->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE) -+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE) - try_intel_interface = true; - - try_vesa_interface = true; -diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c -index 166caf293f7bc..7da4a9cbe4ba4 100644 ---- a/drivers/gpu/drm/i915/display/intel_drrs.c -+++ b/drivers/gpu/drm/i915/display/intel_drrs.c -@@ -217,9 +217,6 @@ static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv, - { - struct intel_crtc *crtc; - -- if (dev_priv->vbt.drrs_type != DRRS_TYPE_SEAMLESS) -- return; -- - for_each_intel_crtc(&dev_priv->drm, crtc) { - unsigned int frontbuffer_bits; - -diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c -index 389a8c24cdc1e..35e121cd226c5 100644 ---- a/drivers/gpu/drm/i915/display/intel_dsi.c -+++ b/drivers/gpu/drm/i915/display/intel_dsi.c -@@ -102,7 +102,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector) - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum drm_panel_orientation orientation; - -- orientation = dev_priv->vbt.dsi.orientation; -+ orientation = connector->panel.vbt.dsi.orientation; - if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) - return orientation; - -diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c -index 7d234429e71ef..1bc7118c56a2a 100644 ---- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c -+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c -@@ -160,12 +160,10 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, - static int dcs_setup_backlight(struct intel_connector *connector, - enum pipe unused) - { -- struct drm_device *dev = connector->base.dev; -- struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_panel *panel = &connector->panel; - -- if (dev_priv->vbt.backlight.brightness_precision_bits > 8) -- panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1; -+ if (panel->vbt.backlight.brightness_precision_bits > 8) -+ panel->backlight.max = (1 << panel->vbt.backlight.brightness_precision_bits) - 1; - else - panel->backlight.max = PANEL_PWM_MAX_VALUE; - -@@ -185,11 +183,10 @@ static const struct intel_panel_bl_funcs dcs_bl_funcs = { - int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) - { - struct drm_device *dev = intel_connector->base.dev; -- struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_encoder *encoder = intel_attached_encoder(intel_connector); - struct intel_panel *panel = &intel_connector->panel; - -- if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) -+ if (panel->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) - return -ENODEV; - - if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI)) -diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c -index dd24aef925f2e..75e8cc4337c93 100644 ---- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c -+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c -@@ -240,9 +240,10 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) - return data; - } - --static void vlv_exec_gpio(struct drm_i915_private *dev_priv, -+static void vlv_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) - { -+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct gpio_map *map; - u16 pconf0, padval; - u32 tmp; -@@ -256,7 +257,7 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, - - map = &vlv_gpio_table[gpio_index]; - -- if (dev_priv->vbt.dsi.seq_version >= 3) { -+ if (connector->panel.vbt.dsi.seq_version >= 3) { - /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ - port = IOSF_PORT_GPIO_NC; - } else { -@@ -287,14 +288,15 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, - vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); - } - --static void chv_exec_gpio(struct drm_i915_private *dev_priv, -+static void chv_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) - { -+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u16 cfg0, cfg1; - u16 family_num; - u8 port; - -- if (dev_priv->vbt.dsi.seq_version >= 3) { -+ if (connector->panel.vbt.dsi.seq_version >= 3) { - if (gpio_index >= CHV_GPIO_IDX_START_SE) { - /* XXX: it's unclear whether 255->57 is part of SE. */ - gpio_index -= CHV_GPIO_IDX_START_SE; -@@ -340,9 +342,10 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, - vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); - } - --static void bxt_exec_gpio(struct drm_i915_private *dev_priv, -+static void bxt_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) - { -+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - /* XXX: this table is a quick ugly hack. */ - static struct gpio_desc *bxt_gpio_table[U8_MAX + 1]; - struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index]; -@@ -366,9 +369,11 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, - gpiod_set_value(gpio_desc, value); - } - --static void icl_exec_gpio(struct drm_i915_private *dev_priv, -+static void icl_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) - { -+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev); -+ - drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); - } - -@@ -376,18 +381,19 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) - { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); -+ struct intel_connector *connector = intel_dsi->attached_connector; - u8 gpio_source, gpio_index = 0, gpio_number; - bool value; - - drm_dbg_kms(&dev_priv->drm, "\n"); - -- if (dev_priv->vbt.dsi.seq_version >= 3) -+ if (connector->panel.vbt.dsi.seq_version >= 3) - gpio_index = *data++; - - gpio_number = *data++; - - /* gpio source in sequence v2 only */ -- if (dev_priv->vbt.dsi.seq_version == 2) -+ if (connector->panel.vbt.dsi.seq_version == 2) - gpio_source = (*data >> 1) & 3; - else - gpio_source = 0; -@@ -396,13 +402,13 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) - value = *data++ & 1; - - if (DISPLAY_VER(dev_priv) >= 11) -- icl_exec_gpio(dev_priv, gpio_source, gpio_index, value); -+ icl_exec_gpio(connector, gpio_source, gpio_index, value); - else if (IS_VALLEYVIEW(dev_priv)) -- vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); -+ vlv_exec_gpio(connector, gpio_source, gpio_number, value); - else if (IS_CHERRYVIEW(dev_priv)) -- chv_exec_gpio(dev_priv, gpio_source, gpio_number, value); -+ chv_exec_gpio(connector, gpio_source, gpio_number, value); - else -- bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value); -+ bxt_exec_gpio(connector, gpio_source, gpio_index, value); - - return data; - } -@@ -585,14 +591,15 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, - enum mipi_seq seq_id) - { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); -+ struct intel_connector *connector = intel_dsi->attached_connector; - const u8 *data; - fn_mipi_elem_exec mipi_elem_exec; - - if (drm_WARN_ON(&dev_priv->drm, -- seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence))) -+ seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence))) - return; - -- data = dev_priv->vbt.dsi.sequence[seq_id]; -+ data = connector->panel.vbt.dsi.sequence[seq_id]; - if (!data) - return; - -@@ -605,7 +612,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, - data++; - - /* Skip Size of Sequence. */ -- if (dev_priv->vbt.dsi.seq_version >= 3) -+ if (connector->panel.vbt.dsi.seq_version >= 3) - data += 4; - - while (1) { -@@ -621,7 +628,7 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, - mipi_elem_exec = NULL; - - /* Size of Operation. */ -- if (dev_priv->vbt.dsi.seq_version >= 3) -+ if (connector->panel.vbt.dsi.seq_version >= 3) - operation_size = *data++; - - if (mipi_elem_exec) { -@@ -669,10 +676,10 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, - - void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) - { -- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); -+ struct intel_connector *connector = intel_dsi->attached_connector; - - /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */ -- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3) -+ if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3) - return; - - msleep(msec); -@@ -734,9 +741,10 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) - { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); -- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; -- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; -- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; -+ struct intel_connector *connector = intel_dsi->attached_connector; -+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; -+ struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps; -+ struct drm_display_mode *mode = connector->panel.vbt.lfp_lvds_vbt_mode; - u16 burst_mode_ratio; - enum port port; - -@@ -872,7 +880,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) - { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); -- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; -+ struct intel_connector *connector = intel_dsi->attached_connector; -+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; - enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; - bool want_backlight_gpio = false; - bool want_panel_gpio = false; -@@ -927,7 +936,8 @@ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi) - { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); -- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; -+ struct intel_connector *connector = intel_dsi->attached_connector; -+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; - - if (intel_dsi->gpio_panel) { - gpiod_put(intel_dsi->gpio_panel); -diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c -index e8478161f8b9b..9f250a70519aa 100644 ---- a/drivers/gpu/drm/i915/display/intel_lvds.c -+++ b/drivers/gpu/drm/i915/display/intel_lvds.c -@@ -809,7 +809,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) - else - val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK); - if (val == 0) -- val = dev_priv->vbt.bios_lvds_val; -+ val = connector->panel.vbt.bios_lvds_val; - - return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; - } -@@ -967,9 +967,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) - } - intel_connector->edid = edid; - -+ intel_bios_init_panel(dev_priv, &intel_connector->panel); -+ - /* Try EDID first */ - intel_panel_add_edid_fixed_modes(intel_connector, -- dev_priv->vbt.drrs_type != DRRS_TYPE_NONE); -+ intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE); - - /* Failed to get EDID, what about VBT? */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) -diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c -index d1d1b59102d69..d055e41185582 100644 ---- a/drivers/gpu/drm/i915/display/intel_panel.c -+++ b/drivers/gpu/drm/i915/display/intel_panel.c -@@ -75,9 +75,8 @@ const struct drm_display_mode * - intel_panel_downclock_mode(struct intel_connector *connector, - const struct drm_display_mode *adjusted_mode) - { -- struct drm_i915_private *i915 = to_i915(connector->base.dev); - const struct drm_display_mode *fixed_mode, *best_mode = NULL; -- int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate; -+ int min_vrefresh = connector->panel.vbt.seamless_drrs_min_refresh_rate; - int max_vrefresh = drm_mode_vrefresh(adjusted_mode); - - /* pick the fixed_mode with the lowest refresh rate */ -@@ -113,13 +112,11 @@ int intel_panel_get_modes(struct intel_connector *connector) - - enum drrs_type intel_panel_drrs_type(struct intel_connector *connector) - { -- struct drm_i915_private *i915 = to_i915(connector->base.dev); -- - if (list_empty(&connector->panel.fixed_modes) || - list_is_singular(&connector->panel.fixed_modes)) - return DRRS_TYPE_NONE; - -- return i915->vbt.drrs_type; -+ return connector->panel.vbt.drrs_type; - } - - int intel_panel_compute_config(struct intel_connector *connector, -@@ -260,7 +257,7 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector) - struct drm_i915_private *i915 = to_i915(connector->base.dev); - const struct drm_display_mode *mode; - -- mode = i915->vbt.lfp_lvds_vbt_mode; -+ mode = connector->panel.vbt.lfp_lvds_vbt_mode; - if (!mode) - return; - -@@ -274,7 +271,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector) - struct drm_i915_private *i915 = to_i915(connector->base.dev); - const struct drm_display_mode *mode; - -- mode = i915->vbt.sdvo_lvds_vbt_mode; -+ mode = connector->panel.vbt.sdvo_lvds_vbt_mode; - if (!mode) - return; - -@@ -639,6 +636,8 @@ void intel_panel_fini(struct intel_connector *connector) - - intel_backlight_destroy(panel); - -+ intel_bios_fini_panel(panel); -+ - list_for_each_entry_safe(fixed_mode, next, &panel->fixed_modes, head) { - list_del(&fixed_mode->head); - drm_mode_destroy(connector->base.dev, fixed_mode); -diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c -index 5a598dd060391..a226e4e5c5698 100644 ---- a/drivers/gpu/drm/i915/display/intel_pps.c -+++ b/drivers/gpu/drm/i915/display/intel_pps.c -@@ -209,7 +209,8 @@ static int - bxt_power_sequencer_idx(struct intel_dp *intel_dp) - { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); -- int backlight_controller = dev_priv->vbt.backlight.controller; -+ struct intel_connector *connector = intel_dp->attached_connector; -+ int backlight_controller = connector->panel.vbt.backlight.controller; - - lockdep_assert_held(&dev_priv->pps_mutex); - -@@ -1159,53 +1160,84 @@ intel_pps_verify_state(struct intel_dp *intel_dp) - } - } - --static void pps_init_delays(struct intel_dp *intel_dp) -+static void pps_init_delays_cur(struct intel_dp *intel_dp, -+ struct edp_power_seq *cur) - { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); -- struct edp_power_seq cur, vbt, spec, -- *final = &intel_dp->pps.pps_delays; - - lockdep_assert_held(&dev_priv->pps_mutex); - -- /* already initialized? */ -- if (final->t11_t12 != 0) -- return; -+ intel_pps_readout_hw_state(intel_dp, cur); -+ -+ intel_pps_dump_state(intel_dp, "cur", cur); -+} - -- intel_pps_readout_hw_state(intel_dp, &cur); -+static void pps_init_delays_vbt(struct intel_dp *intel_dp, -+ struct edp_power_seq *vbt) -+{ -+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); -+ struct intel_connector *connector = intel_dp->attached_connector; - -- intel_pps_dump_state(intel_dp, "cur", &cur); -+ *vbt = connector->panel.vbt.edp.pps; - -- vbt = dev_priv->vbt.edp.pps; - /* On Toshiba Satellite P50-C-18C system the VBT T12 delay - * of 500ms appears to be too short. Ocassionally the panel - * just fails to power back on. Increasing the delay to 800ms - * seems sufficient to avoid this problem. - */ - if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { -- vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); -+ vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10); - drm_dbg_kms(&dev_priv->drm, - "Increasing T12 panel delay as per the quirk to %d\n", -- vbt.t11_t12); -+ vbt->t11_t12); - } -+ - /* T11_T12 delay is special and actually in units of 100ms, but zero - * based in the hw (so we need to add 100 ms). But the sw vbt - * table multiplies it with 1000 to make it in units of 100usec, - * too. */ -- vbt.t11_t12 += 100 * 10; -+ vbt->t11_t12 += 100 * 10; -+ -+ intel_pps_dump_state(intel_dp, "vbt", vbt); -+} -+ -+static void pps_init_delays_spec(struct intel_dp *intel_dp, -+ struct edp_power_seq *spec) -+{ -+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); -+ -+ lockdep_assert_held(&dev_priv->pps_mutex); - - /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of - * our hw here, which are all in 100usec. */ -- spec.t1_t3 = 210 * 10; -- spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ -- spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ -- spec.t10 = 500 * 10; -+ spec->t1_t3 = 210 * 10; -+ spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */ -+ spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ -+ spec->t10 = 500 * 10; - /* This one is special and actually in units of 100ms, but zero - * based in the hw (so we need to add 100 ms). But the sw vbt - * table multiplies it with 1000 to make it in units of 100usec, - * too. */ -- spec.t11_t12 = (510 + 100) * 10; -+ spec->t11_t12 = (510 + 100) * 10; -+ -+ intel_pps_dump_state(intel_dp, "spec", spec); -+} -+ -+static void pps_init_delays(struct intel_dp *intel_dp) -+{ -+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); -+ struct edp_power_seq cur, vbt, spec, -+ *final = &intel_dp->pps.pps_delays; -+ -+ lockdep_assert_held(&dev_priv->pps_mutex); -+ -+ /* already initialized? */ -+ if (final->t11_t12 != 0) -+ return; - -- intel_pps_dump_state(intel_dp, "vbt", &vbt); -+ pps_init_delays_cur(intel_dp, &cur); -+ pps_init_delays_vbt(intel_dp, &vbt); -+ pps_init_delays_spec(intel_dp, &spec); - - /* Use the max of the register settings and vbt. If both are - * unset, fall back to the spec limits. */ -diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c -index 06db407e2749f..8f09203e0cf03 100644 ---- a/drivers/gpu/drm/i915/display/intel_psr.c -+++ b/drivers/gpu/drm/i915/display/intel_psr.c -@@ -86,10 +86,13 @@ - - static bool psr_global_enabled(struct intel_dp *intel_dp) - { -+ struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - - switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { - case I915_PSR_DEBUG_DEFAULT: -+ if (i915->params.enable_psr == -1) -+ return connector->panel.vbt.psr.enable; - return i915->params.enable_psr; - case I915_PSR_DEBUG_DISABLE: - return false; -@@ -399,6 +402,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) - - static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) - { -+ struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 val = 0; - -@@ -411,20 +415,20 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) - goto check_tp3_sel; - } - -- if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) -+ if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0) - val |= EDP_PSR_TP1_TIME_0us; -- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) -+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100) - val |= EDP_PSR_TP1_TIME_100us; -- else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) -+ else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500) - val |= EDP_PSR_TP1_TIME_500us; - else - val |= EDP_PSR_TP1_TIME_2500us; - -- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) -+ if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0) - val |= EDP_PSR_TP2_TP3_TIME_0us; -- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) -+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100) - val |= EDP_PSR_TP2_TP3_TIME_100us; -- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) -+ else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500) - val |= EDP_PSR_TP2_TP3_TIME_500us; - else - val |= EDP_PSR_TP2_TP3_TIME_2500us; -@@ -441,13 +445,14 @@ check_tp3_sel: - - static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) - { -+ struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int idle_frames; - - /* Let's use 6 as the minimum to cover all known cases including the - * off-by-one issue that HW has in some cases. - */ -- idle_frames = max(6, dev_priv->vbt.psr.idle_frames); -+ idle_frames = max(6, connector->panel.vbt.psr.idle_frames); - idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1); - - if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) -@@ -483,18 +488,19 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) - - static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) - { -+ struct intel_connector *connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 val = 0; - - if (dev_priv->params.psr_safest_params) - return EDP_PSR2_TP2_TIME_2500us; - -- if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && -- dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) -+ if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && -+ connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) - val |= EDP_PSR2_TP2_TIME_50us; -- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) -+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) - val |= EDP_PSR2_TP2_TIME_100us; -- else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) -+ else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) - val |= EDP_PSR2_TP2_TIME_500us; - else - val |= EDP_PSR2_TP2_TIME_2500us; -@@ -2344,6 +2350,7 @@ unlock: - */ - void intel_psr_init(struct intel_dp *intel_dp) - { -+ struct intel_connector *connector = intel_dp->attached_connector; - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - -@@ -2367,14 +2374,10 @@ void intel_psr_init(struct intel_dp *intel_dp) - - intel_dp->psr.source_support = true; - -- if (dev_priv->params.enable_psr == -1) -- if (!dev_priv->vbt.psr.enable) -- dev_priv->params.enable_psr = 0; -- - /* Set link_standby x link_off defaults */ - if (DISPLAY_VER(dev_priv) < 12) - /* For new platforms up to TGL let's respect VBT back again */ -- intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link; -+ intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link; - - INIT_WORK(&intel_dp->psr.work, intel_psr_work); - INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work); -diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c -index d81855d57cdc9..14a64bd61176d 100644 ---- a/drivers/gpu/drm/i915/display/intel_sdvo.c -+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c -@@ -2869,6 +2869,7 @@ static bool - intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) - { - struct drm_encoder *encoder = &intel_sdvo->base.base; -+ struct drm_i915_private *i915 = to_i915(encoder->dev); - struct drm_connector *connector; - struct intel_connector *intel_connector; - struct intel_sdvo_connector *intel_sdvo_connector; -@@ -2900,6 +2901,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) - if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) - goto err; - -+ intel_bios_init_panel(i915, &intel_connector->panel); -+ - /* - * Fetch modes from VBT. For SDVO prefer the VBT mode since some - * SDVO->LVDS transcoders can't cope with the EDID mode. -diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c -index 1954f07f0d3ec..02f75e95b2ec1 100644 ---- a/drivers/gpu/drm/i915/display/vlv_dsi.c -+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c -@@ -782,6 +782,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, - { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); -+ struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - enum port port; -@@ -838,7 +839,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, - * the delay in that case. If there is no deassert-seq, then an - * unconditional msleep is used to give the panel time to power-on. - */ -- if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) { -+ if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) { - intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); - } else { -@@ -1690,7 +1691,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) - { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); -- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; -+ struct intel_connector *connector = intel_dsi->attached_connector; -+ struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; - u32 tlpx_ns, extra_byte_count, tlpx_ui; - u32 ui_num, ui_den; - u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; -@@ -1924,13 +1926,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) - - intel_dsi->panel_power_off_time = ktime_get_boottime(); - -- if (dev_priv->vbt.dsi.config->dual_link) -+ intel_bios_init_panel(dev_priv, &intel_connector->panel); -+ -+ if (intel_connector->panel.vbt.dsi.config->dual_link) - intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); - else - intel_dsi->ports = BIT(port); - -- intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports; -- intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports; -+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) -+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; -+ -+ intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; -+ -+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) -+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; -+ -+ intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; - - /* Create a DSI host (and a device) for each port. */ - for_each_dsi_port(port, intel_dsi->ports) { -diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c -index 321af109d484f..8da42af0256ab 100644 ---- a/drivers/gpu/drm/i915/gem/i915_gem_context.c -+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c -@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work) - trace_i915_context_free(ctx); - GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); - -+ spin_lock(&ctx->i915->gem.contexts.lock); -+ list_del(&ctx->link); -+ spin_unlock(&ctx->i915->gem.contexts.lock); -+ - if (ctx->syncobj) - drm_syncobj_put(ctx->syncobj); - -@@ -1514,10 +1518,6 @@ static void context_close(struct i915_gem_context *ctx) - - ctx->file_priv = ERR_PTR(-EBADF); - -- spin_lock(&ctx->i915->gem.contexts.lock); -- list_del(&ctx->link); -- spin_unlock(&ctx->i915->gem.contexts.lock); -- - client = ctx->client; - if (client) { - spin_lock(&client->ctx_lock); -diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index 5184d70d48382..554d79bc0312d 100644 ---- a/drivers/gpu/drm/i915/i915_drv.h -+++ b/drivers/gpu/drm/i915/i915_drv.h -@@ -194,12 +194,6 @@ struct drm_i915_display_funcs { - - #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ - --enum drrs_type { -- DRRS_TYPE_NONE, -- DRRS_TYPE_STATIC, -- DRRS_TYPE_SEAMLESS, --}; -- - #define QUIRK_LVDS_SSC_DISABLE (1<<1) - #define QUIRK_INVERT_BRIGHTNESS (1<<2) - #define QUIRK_BACKLIGHT_PRESENT (1<<3) -@@ -308,76 +302,19 @@ struct intel_vbt_data { - /* bdb version */ - u16 version; - -- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ -- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ -- - /* Feature bits */ - unsigned int int_tv_support:1; -- unsigned int lvds_dither:1; - unsigned int int_crt_support:1; - unsigned int lvds_use_ssc:1; - unsigned int int_lvds_support:1; - unsigned int display_clock_mode:1; - unsigned int fdi_rx_polarity_inverted:1; -- unsigned int panel_type:4; - int lvds_ssc_freq; -- unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ - enum drm_panel_orientation orientation; - - bool override_afc_startup; - u8 override_afc_startup_val; - -- u8 seamless_drrs_min_refresh_rate; -- enum drrs_type drrs_type; -- -- struct { -- int rate; -- int lanes; -- int preemphasis; -- int vswing; -- int bpp; -- struct edp_power_seq pps; -- u8 drrs_msa_timing_delay; -- bool low_vswing; -- bool initialized; -- bool hobl; -- } edp; -- -- struct { -- bool enable; -- bool full_link; -- bool require_aux_wakeup; -- int idle_frames; -- int tp1_wakeup_time_us; -- int tp2_tp3_wakeup_time_us; -- int psr2_tp2_tp3_wakeup_time_us; -- } psr; -- -- struct { -- u16 pwm_freq_hz; -- u16 brightness_precision_bits; -- bool present; -- bool active_low_pwm; -- u8 min_brightness; /* min_brightness/255 of max */ -- u8 controller; /* brightness controller number */ -- enum intel_backlight_type type; -- } backlight; -- -- /* MIPI DSI */ -- struct { -- u16 panel_id; -- struct mipi_config *config; -- struct mipi_pps_data *pps; -- u16 bl_ports; -- u16 cabc_ports; -- u8 seq_version; -- u32 size; -- u8 *data; -- const u8 *sequence[MIPI_SEQ_MAX]; -- u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ -- enum drm_panel_orientation orientation; -- } dsi; -- - int crt_ddc_pin; - - struct list_head display_devices; -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c -index 702e5b89be226..b605d0ceaefad 100644 ---- a/drivers/gpu/drm/i915/i915_gem.c -+++ b/drivers/gpu/drm/i915/i915_gem.c -@@ -1191,7 +1191,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) - - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); - -- i915_gem_drain_freed_objects(dev_priv); -+ /* Flush any outstanding work, including i915_gem_context.release_work. */ -+ i915_gem_drain_workqueue(dev_priv); - - drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list)); - } -diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c -index 5d7504a72b11c..e244aa408d9d4 100644 ---- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c -+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c -@@ -151,7 +151,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w, - { - struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - -- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); -+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); - mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, - DISP_REG_DITHER_CFG); - mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, -diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c -index af2f123e9a9a9..9a3b86c29b503 100644 ---- a/drivers/gpu/drm/mediatek/mtk_dsi.c -+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c -@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) - if (--dsi->refcount != 0) - return; - -+ /* -+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since -+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), -+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq. -+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), -+ * after dsi is fully set. -+ */ -+ mtk_dsi_stop(dsi); -+ -+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); - mtk_dsi_reset_engine(dsi); - mtk_dsi_lane0_ulp_mode_enter(dsi); - mtk_dsi_clk_ulp_mode_enter(dsi); -@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) - if (!dsi->enabled) - return; - -- /* -- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since -- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), -- * which needs irq for vblank, and mtk_dsi_stop() will disable irq. -- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), -- * after dsi is fully set. -- */ -- mtk_dsi_stop(dsi); -- -- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); -- - dsi->enabled = false; - } - -@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, - - static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { - .attach = mtk_dsi_bridge_attach, -+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_disable = mtk_dsi_bridge_atomic_disable, -+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, - .atomic_enable = mtk_dsi_bridge_atomic_enable, - .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, - .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, -+ .atomic_reset = drm_atomic_helper_bridge_reset, - .mode_set = mtk_dsi_bridge_mode_set, - }; - -diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c -index 4a2e580a2f7b7..0e001ce8a40fd 100644 ---- a/drivers/gpu/drm/panel/panel-simple.c -+++ b/drivers/gpu/drm/panel/panel-simple.c -@@ -2136,7 +2136,7 @@ static const struct panel_desc innolux_g121i1_l01 = { - .enable = 200, - .disable = 20, - }, -- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, -+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, - .connector_type = DRM_MODE_CONNECTOR_LVDS, - }; - -diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c -index c204e9b95c1f7..518ee13b1d6f4 100644 ---- a/drivers/gpu/drm/rockchip/cdn-dp-core.c -+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c -@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector) - return ret; - } - --static int cdn_dp_connector_mode_valid(struct drm_connector *connector, -- struct drm_display_mode *mode) -+static enum drm_mode_status -+cdn_dp_connector_mode_valid(struct drm_connector *connector, -+ struct drm_display_mode *mode) - { - struct cdn_dp_device *dp = connector_to_dp(connector); - struct drm_display_info *display_info = &dp->connector.display_info; -diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c -index 547ae334e5cd8..027029efb0088 100644 ---- a/drivers/hv/vmbus_drv.c -+++ b/drivers/hv/vmbus_drv.c -@@ -2309,7 +2309,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, - bool fb_overlap_ok) - { - struct resource *iter, *shadow; -- resource_size_t range_min, range_max, start; -+ resource_size_t range_min, range_max, start, end; - const char *dev_n = dev_name(&device_obj->device); - int retval; - -@@ -2344,6 +2344,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, - range_max = iter->end; - start = (range_min + align - 1) & ~(align - 1); - for (; start + size - 1 <= range_max; start += align) { -+ end = start + size - 1; -+ -+ /* Skip the whole fb_mmio region if not fb_overlap_ok */ -+ if (!fb_overlap_ok && fb_mmio && -+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) || -+ ((end >= fb_mmio->start) && (end <= fb_mmio->end)))) -+ continue; -+ - shadow = __request_region(iter, start, size, NULL, - IORESOURCE_BUSY); - if (!shadow) -diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c -index e47fa34656717..3082183bd66a4 100644 ---- a/drivers/i2c/busses/i2c-imx.c -+++ b/drivers/i2c/busses/i2c-imx.c -@@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev) - if (i2c_imx->dma) - i2c_imx_dma_free(i2c_imx); - -- if (ret == 0) { -+ if (ret >= 0) { - /* setup chip registers to defaults */ - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); -diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c -index 8716032f030a0..ad5efd7497d1c 100644 ---- a/drivers/i2c/busses/i2c-mlxbf.c -+++ b/drivers/i2c/busses/i2c-mlxbf.c -@@ -6,6 +6,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -63,13 +64,14 @@ - */ - #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) - /* Reference clock for Bluefield - 156 MHz. */ --#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000) -+#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL - - /* Constant used to determine the PLL frequency. */ --#define MLNXBF_I2C_COREPLL_CONST 16384 -+#define MLNXBF_I2C_COREPLL_CONST 16384ULL -+ -+#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL - - /* PLL registers. */ --#define MLXBF_I2C_CORE_PLL_REG0 0x0 - #define MLXBF_I2C_CORE_PLL_REG1 0x4 - #define MLXBF_I2C_CORE_PLL_REG2 0x8 - -@@ -181,22 +183,15 @@ - #define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ - - /* Core PLL TYU configuration. */ --#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0) --#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0) --#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0) -- --#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3 --#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16 --#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20 -+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3) -+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16) -+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20) - - /* Core PLL YU configuration. */ - #define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0) - #define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0) --#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0) -+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26) - --#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0 --#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1 --#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26 - - /* Core PLL frequency. */ - static u64 mlxbf_i2c_corepll_frequency; -@@ -479,8 +474,6 @@ static struct mutex mlxbf_i2c_bus_lock; - #define MLXBF_I2C_MASK_8 GENMASK(7, 0) - #define MLXBF_I2C_MASK_16 GENMASK(15, 0) - --#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000 -- - /* - * Function to poll a set of bits at a specific address; it checks whether - * the bits are equal to zero when eq_zero is set to 'true', and not equal -@@ -669,7 +662,7 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, - /* Clear status bits. */ - writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS); - /* Set the cause data. */ -- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR); -+ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); - /* Zero PEC byte. */ - writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC); - /* Zero byte count. */ -@@ -738,6 +731,9 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, - if (flags & MLXBF_I2C_F_WRITE) { - write_en = 1; - write_len += operation->length; -+ if (data_idx + operation->length > -+ MLXBF_I2C_MASTER_DATA_DESC_SIZE) -+ return -ENOBUFS; - memcpy(data_desc + data_idx, - operation->buffer, operation->length); - data_idx += operation->length; -@@ -1407,24 +1403,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev, - return 0; - } - --static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) -+static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) - { -- u64 core_frequency, pad_frequency; -+ u64 core_frequency; - u8 core_od, core_r; - u32 corepll_val; - u16 core_f; - -- pad_frequency = MLXBF_I2C_PLL_IN_FREQ; -- - corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); - - /* Get Core PLL configuration bits. */ -- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) & -- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK; -- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) & -- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK; -- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) & -- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK; -+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val); -+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val); -+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val); - - /* - * Compute PLL output frequency as follow: -@@ -1436,31 +1427,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) - * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency - * and PadFrequency, respectively. - */ -- core_frequency = pad_frequency * (++core_f); -+ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); - core_frequency /= (++core_r) * (++core_od); - - return core_frequency; - } - --static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) -+static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) - { - u32 corepll_reg1_val, corepll_reg2_val; -- u64 corepll_frequency, pad_frequency; -+ u64 corepll_frequency; - u8 core_od, core_r; - u32 core_f; - -- pad_frequency = MLXBF_I2C_PLL_IN_FREQ; -- - corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); - corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); - - /* Get Core PLL configuration bits */ -- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) & -- MLXBF_I2C_COREPLL_CORE_F_YU_MASK; -- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) & -- MLXBF_I2C_COREPLL_CORE_R_YU_MASK; -- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) & -- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK; -+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val); -+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val); -+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val); - - /* - * Compute PLL output frequency as follow: -@@ -1472,7 +1458,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) - * Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency - * and PadFrequency, respectively. - */ -- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST; -+ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; - corepll_frequency /= (++core_r) * (++core_od); - - return corepll_frequency; -@@ -2180,14 +2166,14 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = { - [1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1], - [2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1] - }, -- .calculate_freq = mlxbf_calculate_freq_from_tyu -+ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu - }, - [MLXBF_I2C_CHIP_TYPE_2] = { - .type = MLXBF_I2C_CHIP_TYPE_2, - .shared_res = { - [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2] - }, -- .calculate_freq = mlxbf_calculate_freq_from_yu -+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu - } - }; - -diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c -index 774507b54b57b..313904be5f3bd 100644 ---- a/drivers/i2c/i2c-mux.c -+++ b/drivers/i2c/i2c-mux.c -@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, - int (*deselect)(struct i2c_mux_core *, u32)) - { - struct i2c_mux_core *muxc; -+ size_t mux_size; - -- muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters) -- + sizeof_priv, GFP_KERNEL); -+ mux_size = struct_size(muxc, adapter, max_adapters); -+ muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL); - if (!muxc) - return NULL; - if (sizeof_priv) -diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c -index 861a239d905a4..3ed15e8ca6775 100644 ---- a/drivers/iommu/intel/iommu.c -+++ b/drivers/iommu/intel/iommu.c -@@ -419,7 +419,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) - { - unsigned long fl_sagaw, sl_sagaw; - -- fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0); -+ fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); - sl_sagaw = cap_sagaw(iommu->cap); - - /* Second level only. */ -diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c -index 7835bb0f32fc3..e012b21c4fd7a 100644 ---- a/drivers/media/usb/b2c2/flexcop-usb.c -+++ b/drivers/media/usb/b2c2/flexcop-usb.c -@@ -511,7 +511,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb) - - if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1) - return -ENODEV; -- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc)) -+ if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[0].desc)) - return -ENODEV; - - switch (fc_usb->udev->speed) { -diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c -index f8fdf88fb240c..ecbc46714e681 100644 ---- a/drivers/memstick/core/ms_block.c -+++ b/drivers/memstick/core/ms_block.c -@@ -2188,7 +2188,6 @@ static void msb_remove(struct memstick_dev *card) - - /* Remove the disk */ - del_gendisk(msb->disk); -- blk_cleanup_queue(msb->queue); - blk_mq_free_tag_set(&msb->tag_set); - msb->queue = NULL; - -diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c -index 725ba74ded308..72e91c06c618b 100644 ---- a/drivers/memstick/core/mspro_block.c -+++ b/drivers/memstick/core/mspro_block.c -@@ -1294,7 +1294,6 @@ static void mspro_block_remove(struct memstick_dev *card) - del_gendisk(msb->disk); - dev_dbg(&card->dev, "mspro block remove\n"); - -- blk_cleanup_queue(msb->queue); - blk_mq_free_tag_set(&msb->tag_set); - msb->queue = NULL; - -diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c -index 912a398a9a764..2f89ae55c1773 100644 ---- a/drivers/mmc/core/block.c -+++ b/drivers/mmc/core/block.c -@@ -2509,7 +2509,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, - return md; - - err_cleanup_queue: -- blk_cleanup_queue(md->disk->queue); - blk_mq_free_tag_set(&md->queue.tag_set); - err_kfree: - kfree(md); -diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c -index fa5324ceeebe4..f824cfdab75ac 100644 ---- a/drivers/mmc/core/queue.c -+++ b/drivers/mmc/core/queue.c -@@ -494,7 +494,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) - if (blk_queue_quiesced(q)) - blk_mq_unquiesce_queue(q); - -- blk_cleanup_queue(q); - blk_mq_free_tag_set(&mq->tag_set); - - /* -diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c -index 1f0120cbe9e80..8ad095c19f271 100644 ---- a/drivers/net/bonding/bond_3ad.c -+++ b/drivers/net/bonding/bond_3ad.c -@@ -87,8 +87,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = { - static u16 ad_ticks_per_sec; - static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; - --static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = -- MULTICAST_LACPDU_ADDR; -+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = { -+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 -+}; - - /* ================= main 802.3ad protocol functions ================== */ - static int ad_lacpdu_send(struct port *port); -diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c -index bff0bfd10e235..ab7cb48f8dfdd 100644 ---- a/drivers/net/bonding/bond_main.c -+++ b/drivers/net/bonding/bond_main.c -@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, - dev_uc_unsync(slave_dev, bond_dev); - dev_mc_unsync(slave_dev, bond_dev); - -- if (BOND_MODE(bond) == BOND_MODE_8023AD) { -- /* del lacpdu mc addr from mc list */ -- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; -- -- dev_mc_del(slave_dev, lacpdu_multicast); -- } -+ if (BOND_MODE(bond) == BOND_MODE_8023AD) -+ dev_mc_del(slave_dev, lacpdu_mcast_addr); - } - - /*--------------------------- Active slave change ---------------------------*/ -@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, - if (bond->dev->flags & IFF_ALLMULTI) - dev_set_allmulti(old_active->dev, -1); - -- bond_hw_addr_flush(bond->dev, old_active->dev); -+ if (bond->dev->flags & IFF_UP) -+ bond_hw_addr_flush(bond->dev, old_active->dev); - } - - if (new_active) { -@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, - if (bond->dev->flags & IFF_ALLMULTI) - dev_set_allmulti(new_active->dev, 1); - -- netif_addr_lock_bh(bond->dev); -- dev_uc_sync(new_active->dev, bond->dev); -- dev_mc_sync(new_active->dev, bond->dev); -- netif_addr_unlock_bh(bond->dev); -+ if (bond->dev->flags & IFF_UP) { -+ netif_addr_lock_bh(bond->dev); -+ dev_uc_sync(new_active->dev, bond->dev); -+ dev_mc_sync(new_active->dev, bond->dev); -+ netif_addr_unlock_bh(bond->dev); -+ } - } - } - -@@ -2139,16 +2138,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, - } - } - -- netif_addr_lock_bh(bond_dev); -- dev_mc_sync_multiple(slave_dev, bond_dev); -- dev_uc_sync_multiple(slave_dev, bond_dev); -- netif_addr_unlock_bh(bond_dev); -- -- if (BOND_MODE(bond) == BOND_MODE_8023AD) { -- /* add lacpdu mc addr to mc list */ -- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; -+ if (bond_dev->flags & IFF_UP) { -+ netif_addr_lock_bh(bond_dev); -+ dev_mc_sync_multiple(slave_dev, bond_dev); -+ dev_uc_sync_multiple(slave_dev, bond_dev); -+ netif_addr_unlock_bh(bond_dev); - -- dev_mc_add(slave_dev, lacpdu_multicast); -+ if (BOND_MODE(bond) == BOND_MODE_8023AD) -+ dev_mc_add(slave_dev, lacpdu_mcast_addr); - } - } - -@@ -2420,7 +2417,8 @@ static int __bond_release_one(struct net_device *bond_dev, - if (old_flags & IFF_ALLMULTI) - dev_set_allmulti(slave_dev, -1); - -- bond_hw_addr_flush(bond_dev, slave_dev); -+ if (old_flags & IFF_UP) -+ bond_hw_addr_flush(bond_dev, slave_dev); - } - - slave_disable_netpoll(slave); -@@ -4157,6 +4155,12 @@ static int bond_open(struct net_device *bond_dev) - struct list_head *iter; - struct slave *slave; - -+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { -+ bond->rr_tx_counter = alloc_percpu(u32); -+ if (!bond->rr_tx_counter) -+ return -ENOMEM; -+ } -+ - /* reset slave->backup and slave->inactive */ - if (bond_has_slaves(bond)) { - bond_for_each_slave(bond, slave, iter) { -@@ -4194,6 +4198,9 @@ static int bond_open(struct net_device *bond_dev) - /* register to receive LACPDUs */ - bond->recv_probe = bond_3ad_lacpdu_recv; - bond_3ad_initiate_agg_selection(bond, 1); -+ -+ bond_for_each_slave(bond, slave, iter) -+ dev_mc_add(slave->dev, lacpdu_mcast_addr); - } - - if (bond_mode_can_use_xmit_hash(bond)) -@@ -4205,6 +4212,7 @@ static int bond_open(struct net_device *bond_dev) - static int bond_close(struct net_device *bond_dev) - { - struct bonding *bond = netdev_priv(bond_dev); -+ struct slave *slave; - - bond_work_cancel_all(bond); - bond->send_peer_notif = 0; -@@ -4212,6 +4220,19 @@ static int bond_close(struct net_device *bond_dev) - bond_alb_deinitialize(bond); - bond->recv_probe = NULL; - -+ if (bond_uses_primary(bond)) { -+ rcu_read_lock(); -+ slave = rcu_dereference(bond->curr_active_slave); -+ if (slave) -+ bond_hw_addr_flush(bond_dev, slave->dev); -+ rcu_read_unlock(); -+ } else { -+ struct list_head *iter; -+ -+ bond_for_each_slave(bond, slave, iter) -+ bond_hw_addr_flush(bond_dev, slave->dev); -+ } -+ - return 0; - } - -@@ -6195,15 +6216,6 @@ static int bond_init(struct net_device *bond_dev) - if (!bond->wq) - return -ENOMEM; - -- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) { -- bond->rr_tx_counter = alloc_percpu(u32); -- if (!bond->rr_tx_counter) { -- destroy_workqueue(bond->wq); -- bond->wq = NULL; -- return -ENOMEM; -- } -- } -- - spin_lock_init(&bond->stats_lock); - netdev_lockdep_set_classes(bond_dev); - -diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c -index d060088047f16..131467d37a45b 100644 ---- a/drivers/net/can/flexcan/flexcan-core.c -+++ b/drivers/net/can/flexcan/flexcan-core.c -@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, - u32 reg_ctrl, reg_id, reg_iflag1; - int i; - -- if (unlikely(drop)) { -- skb = ERR_PTR(-ENOBUFS); -- goto mark_as_read; -- } -- - mb = flexcan_get_mb(priv, n); - - if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { -@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, - reg_ctrl = priv->read(&mb->can_ctrl); - } - -+ if (unlikely(drop)) { -+ skb = ERR_PTR(-ENOBUFS); -+ goto mark_as_read; -+ } -+ - if (reg_ctrl & FLEXCAN_MB_CNT_EDL) - skb = alloc_canfd_skb(offload->dev, &cfd); - else -diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c -index d3a658b444b5f..092cd51b3926e 100644 ---- a/drivers/net/can/usb/gs_usb.c -+++ b/drivers/net/can/usb/gs_usb.c -@@ -824,6 +824,7 @@ static int gs_can_open(struct net_device *netdev) - flags |= GS_CAN_MODE_TRIPLE_SAMPLE; - - /* finally start device */ -+ dev->can.state = CAN_STATE_ERROR_ACTIVE; - dm->mode = cpu_to_le32(GS_CAN_MODE_START); - dm->flags = cpu_to_le32(flags); - rc = usb_control_msg(interface_to_usbdev(dev->iface), -@@ -835,13 +836,12 @@ static int gs_can_open(struct net_device *netdev) - if (rc < 0) { - netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); - kfree(dm); -+ dev->can.state = CAN_STATE_STOPPED; - return rc; - } - - kfree(dm); - -- dev->can.state = CAN_STATE_ERROR_ACTIVE; -- - parent->active_channels++; - if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) - netif_start_queue(netdev); -diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c -index 964354536f9ce..111a952f880ee 100644 ---- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c -+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c -@@ -662,7 +662,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) - - for (i = 0; i < nr_pkts; i++) { - struct bnxt_sw_tx_bd *tx_buf; -- bool compl_deferred = false; - struct sk_buff *skb; - int j, last; - -@@ -671,6 +670,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) - skb = tx_buf->skb; - tx_buf->skb = NULL; - -+ tx_bytes += skb->len; -+ - if (tx_buf->is_push) { - tx_buf->is_push = 0; - goto next_tx_int; -@@ -691,8 +692,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) - } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { - if (bp->flags & BNXT_FLAG_CHIP_P5) { -+ /* PTP worker takes ownership of the skb */ - if (!bnxt_get_tx_ts_p5(bp, skb)) -- compl_deferred = true; -+ skb = NULL; - else - atomic_inc(&bp->ptp_cfg->tx_avail); - } -@@ -701,9 +703,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) - next_tx_int: - cons = NEXT_TX(cons); - -- tx_bytes += skb->len; -- if (!compl_deferred) -- dev_kfree_skb_any(skb); -+ dev_kfree_skb_any(skb); - } - - netdev_tx_completed_queue(txq, nr_pkts, tx_bytes); -diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c -index 7f3c0875b6f58..8e316367f6ced 100644 ---- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c -+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c -@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp) - - if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters & - (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | -- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) { -+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) { - ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | -- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE); -+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE); - netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n"); - } - -diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile -index a139f2e9d59f0..e0e8dfd137930 100644 ---- a/drivers/net/ethernet/freescale/enetc/Makefile -+++ b/drivers/net/ethernet/freescale/enetc/Makefile -@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o - - obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o - fsl-enetc-vf-y := enetc_vf.o $(common-objs) --fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o - - obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o - fsl-enetc-ierb-y := enetc_ierb.o -diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c -index 4470a4a3e4c3e..9f5b921039bd4 100644 ---- a/drivers/net/ethernet/freescale/enetc/enetc.c -+++ b/drivers/net/ethernet/freescale/enetc/enetc.c -@@ -2432,7 +2432,7 @@ int enetc_close(struct net_device *ndev) - return 0; - } - --static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) -+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) - { - struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct tc_mqprio_qopt *mqprio = type_data; -@@ -2486,25 +2486,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) - return 0; - } - --int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, -- void *type_data) --{ -- switch (type) { -- case TC_SETUP_QDISC_MQPRIO: -- return enetc_setup_tc_mqprio(ndev, type_data); -- case TC_SETUP_QDISC_TAPRIO: -- return enetc_setup_tc_taprio(ndev, type_data); -- case TC_SETUP_QDISC_CBS: -- return enetc_setup_tc_cbs(ndev, type_data); -- case TC_SETUP_QDISC_ETF: -- return enetc_setup_tc_txtime(ndev, type_data); -- case TC_SETUP_BLOCK: -- return enetc_setup_tc_psfp(ndev, type_data); -- default: -- return -EOPNOTSUPP; -- } --} -- - static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, - struct netlink_ext_ack *extack) - { -@@ -2600,29 +2581,6 @@ static int enetc_set_rss(struct net_device *ndev, int en) - return 0; - } - --static int enetc_set_psfp(struct net_device *ndev, int en) --{ -- struct enetc_ndev_priv *priv = netdev_priv(ndev); -- int err; -- -- if (en) { -- err = enetc_psfp_enable(priv); -- if (err) -- return err; -- -- priv->active_offloads |= ENETC_F_QCI; -- return 0; -- } -- -- err = enetc_psfp_disable(priv); -- if (err) -- return err; -- -- priv->active_offloads &= ~ENETC_F_QCI; -- -- return 0; --} -- - static void enetc_enable_rxvlan(struct net_device *ndev, bool en) - { - struct enetc_ndev_priv *priv = netdev_priv(ndev); -@@ -2641,11 +2599,9 @@ static void enetc_enable_txvlan(struct net_device *ndev, bool en) - enetc_bdr_enable_txvlan(&priv->si->hw, i, en); - } - --int enetc_set_features(struct net_device *ndev, -- netdev_features_t features) -+void enetc_set_features(struct net_device *ndev, netdev_features_t features) - { - netdev_features_t changed = ndev->features ^ features; -- int err = 0; - - if (changed & NETIF_F_RXHASH) - enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); -@@ -2657,11 +2613,6 @@ int enetc_set_features(struct net_device *ndev, - if (changed & NETIF_F_HW_VLAN_CTAG_TX) - enetc_enable_txvlan(ndev, - !!(features & NETIF_F_HW_VLAN_CTAG_TX)); -- -- if (changed & NETIF_F_HW_TC) -- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); -- -- return err; - } - - #ifdef CONFIG_FSL_ENETC_PTP_CLOCK -diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h -index 29922c20531f0..2cfe6944ebd32 100644 ---- a/drivers/net/ethernet/freescale/enetc/enetc.h -+++ b/drivers/net/ethernet/freescale/enetc/enetc.h -@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev); - void enetc_stop(struct net_device *ndev); - netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); - struct net_device_stats *enetc_get_stats(struct net_device *ndev); --int enetc_set_features(struct net_device *ndev, -- netdev_features_t features); -+void enetc_set_features(struct net_device *ndev, netdev_features_t features); - int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); --int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type, -- void *type_data); -+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data); - int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp); - int enetc_xdp_xmit(struct net_device *ndev, int num_frames, - struct xdp_frame **frames, u32 flags); -@@ -465,6 +463,7 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data); - int enetc_psfp_init(struct enetc_ndev_priv *priv); - int enetc_psfp_clean(struct enetc_ndev_priv *priv); -+int enetc_set_psfp(struct net_device *ndev, bool en); - - static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv) - { -@@ -540,4 +539,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv) - { - return 0; - } -+ -+static inline int enetc_set_psfp(struct net_device *ndev, bool en) -+{ -+ return 0; -+} - #endif -diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c -index c4a0e836d4f09..bb7750222691d 100644 ---- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c -+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c -@@ -709,6 +709,13 @@ static int enetc_pf_set_features(struct net_device *ndev, - { - netdev_features_t changed = ndev->features ^ features; - struct enetc_ndev_priv *priv = netdev_priv(ndev); -+ int err; -+ -+ if (changed & NETIF_F_HW_TC) { -+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); -+ if (err) -+ return err; -+ } - - if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { - struct enetc_pf *pf = enetc_si_priv(priv->si); -@@ -722,7 +729,28 @@ static int enetc_pf_set_features(struct net_device *ndev, - if (changed & NETIF_F_LOOPBACK) - enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); - -- return enetc_set_features(ndev, features); -+ enetc_set_features(ndev, features); -+ -+ return 0; -+} -+ -+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type, -+ void *type_data) -+{ -+ switch (type) { -+ case TC_SETUP_QDISC_MQPRIO: -+ return enetc_setup_tc_mqprio(ndev, type_data); -+ case TC_SETUP_QDISC_TAPRIO: -+ return enetc_setup_tc_taprio(ndev, type_data); -+ case TC_SETUP_QDISC_CBS: -+ return enetc_setup_tc_cbs(ndev, type_data); -+ case TC_SETUP_QDISC_ETF: -+ return enetc_setup_tc_txtime(ndev, type_data); -+ case TC_SETUP_BLOCK: -+ return enetc_setup_tc_psfp(ndev, type_data); -+ default: -+ return -EOPNOTSUPP; -+ } - } - - static const struct net_device_ops enetc_ndev_ops = { -@@ -739,7 +767,7 @@ static const struct net_device_ops enetc_ndev_ops = { - .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, - .ndo_set_features = enetc_pf_set_features, - .ndo_eth_ioctl = enetc_ioctl, -- .ndo_setup_tc = enetc_setup_tc, -+ .ndo_setup_tc = enetc_pf_setup_tc, - .ndo_bpf = enetc_setup_bpf, - .ndo_xdp_xmit = enetc_xdp_xmit, - }; -diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c -index 582a663ed0ba4..f8a2f02ce22de 100644 ---- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c -+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c -@@ -1517,6 +1517,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - } - } - -+int enetc_set_psfp(struct net_device *ndev, bool en) -+{ -+ struct enetc_ndev_priv *priv = netdev_priv(ndev); -+ int err; -+ -+ if (en) { -+ err = enetc_psfp_enable(priv); -+ if (err) -+ return err; -+ -+ priv->active_offloads |= ENETC_F_QCI; -+ return 0; -+ } -+ -+ err = enetc_psfp_disable(priv); -+ if (err) -+ return err; -+ -+ priv->active_offloads &= ~ENETC_F_QCI; -+ -+ return 0; -+} -+ - int enetc_psfp_init(struct enetc_ndev_priv *priv) - { - if (epsfp.psfp_sfi_bitmap) -diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c -index 17924305afa2f..dfcaac302e245 100644 ---- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c -+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c -@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) - static int enetc_vf_set_features(struct net_device *ndev, - netdev_features_t features) - { -- return enetc_set_features(ndev, features); -+ enetc_set_features(ndev, features); -+ -+ return 0; -+} -+ -+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type, -+ void *type_data) -+{ -+ switch (type) { -+ case TC_SETUP_QDISC_MQPRIO: -+ return enetc_setup_tc_mqprio(ndev, type_data); -+ default: -+ return -EOPNOTSUPP; -+ } - } - - /* Probing/ Init */ -@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = { - .ndo_set_mac_address = enetc_vf_set_mac_addr, - .ndo_set_features = enetc_vf_set_features, - .ndo_eth_ioctl = enetc_ioctl, -- .ndo_setup_tc = enetc_setup_tc, -+ .ndo_setup_tc = enetc_vf_setup_tc, - }; - - static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, -diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c -index 8c939628e2d85..2e6461b0ea8bc 100644 ---- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c -+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c -@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv, - int err; - - err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page, -- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL); -+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC); - if (err) - return err; - -diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c -index 1aaf0c5ddf6cf..57e27f2024d38 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e_main.c -+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c -@@ -5785,6 +5785,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi) - } - } - -+/** -+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits -+ * @vsi: Pointer to vsi structure -+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits -+ * -+ * Helper function to convert units before send to set BW limit -+ **/ -+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) -+{ -+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { -+ dev_warn(&vsi->back->pdev->dev, -+ "Setting max tx rate to minimum usable value of 50Mbps.\n"); -+ max_tx_rate = I40E_BW_CREDIT_DIVISOR; -+ } else { -+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); -+ } -+ -+ return max_tx_rate; -+} -+ - /** - * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate - * @vsi: VSI to be configured -@@ -5807,10 +5827,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) - max_tx_rate, seid); - return -EINVAL; - } -- if (max_tx_rate && max_tx_rate < 50) { -+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { - dev_warn(&pf->pdev->dev, - "Setting max tx rate to minimum usable value of 50Mbps.\n"); -- max_tx_rate = 50; -+ max_tx_rate = I40E_BW_CREDIT_DIVISOR; - } - - /* Tx rate credits are in values of 50Mbps, 0 is disabled */ -@@ -8101,9 +8121,9 @@ config_tc: - - if (i40e_is_tc_mqprio_enabled(pf)) { - if (vsi->mqprio_qopt.max_rate[0]) { -- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; -+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, -+ vsi->mqprio_qopt.max_rate[0]); - -- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); - ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); - if (!ret) { - u64 credits = max_tx_rate; -@@ -10848,10 +10868,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) - } - - if (vsi->mqprio_qopt.max_rate[0]) { -- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; -+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, -+ vsi->mqprio_qopt.max_rate[0]); - u64 credits = 0; - -- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); - ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); - if (ret) - goto end_unlock; -diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c -index 86b0f21287dc8..67fbaaad39859 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c -+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c -@@ -2038,6 +2038,25 @@ static void i40e_del_qch(struct i40e_vf *vf) - } - } - -+/** -+ * i40e_vc_get_max_frame_size -+ * @vf: pointer to the VF -+ * -+ * Max frame size is determined based on the current port's max frame size and -+ * whether a port VLAN is configured on this VF. The VF is not aware whether -+ * it's in a port VLAN so the PF needs to account for this in max frame size -+ * checks and sending the max frame size to the VF. -+ **/ -+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) -+{ -+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; -+ -+ if (vf->port_vlan_id) -+ max_frame_size -= VLAN_HLEN; -+ -+ return max_frame_size; -+} -+ - /** - * i40e_vc_get_vf_resources_msg - * @vf: pointer to the VF info -@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) - vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; - vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; - vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; -+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf); - - if (vf->lan_vsi_idx) { - vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; -diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c -index 06d18797d25a2..18b6a702a1d6d 100644 ---- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c -+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c -@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw) - { - u32 head, tail; - -+ /* underlying hardware might not allow access and/or always return -+ * 0 for the head/tail registers so just use the cached values -+ */ - head = ring->next_to_clean; -- tail = readl(ring->tail); -+ tail = ring->next_to_use; - - if (head != tail) - return (head < tail) ? -@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, - #endif - struct sk_buff *skb; - -- if (!rx_buffer) -+ if (!rx_buffer || !size) - return NULL; - /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; -@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) - /* exit if we failed to retrieve a buffer */ - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; -- if (rx_buffer) -+ if (rx_buffer && size) - rx_buffer->pagecnt_bias++; - break; - } -diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c -index 1603e99bae4af..498797a0a0a95 100644 ---- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c -+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c -@@ -273,11 +273,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) - void iavf_configure_queues(struct iavf_adapter *adapter) - { - struct virtchnl_vsi_queue_config_info *vqci; -- struct virtchnl_queue_pair_info *vqpi; -+ int i, max_frame = adapter->vf_res->max_mtu; - int pairs = adapter->num_active_queues; -- int i, max_frame = IAVF_MAX_RXBUFFER; -+ struct virtchnl_queue_pair_info *vqpi; - size_t len; - -+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) -+ max_frame = IAVF_MAX_RXBUFFER; -+ - if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { - /* bail because we already have a command pending */ - dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", -diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c -index 6c4e1d45235ef..1169fd7811b09 100644 ---- a/drivers/net/ethernet/intel/ice/ice_lib.c -+++ b/drivers/net/ethernet/intel/ice/ice_lib.c -@@ -911,7 +911,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) - */ - static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) - { -- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; -+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; - u16 num_txq_per_tc, num_rxq_per_tc; - u16 qcount_tx = vsi->alloc_txq; - u16 qcount_rx = vsi->alloc_rxq; -@@ -978,23 +978,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) - * at least 1) - */ - if (offset) -- vsi->num_rxq = offset; -+ rx_count = offset; - else -- vsi->num_rxq = num_rxq_per_tc; -+ rx_count = num_rxq_per_tc; - -- if (vsi->num_rxq > vsi->alloc_rxq) { -+ if (rx_count > vsi->alloc_rxq) { - dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", -- vsi->num_rxq, vsi->alloc_rxq); -+ rx_count, vsi->alloc_rxq); - return -EINVAL; - } - -- vsi->num_txq = tx_count; -- if (vsi->num_txq > vsi->alloc_txq) { -+ if (tx_count > vsi->alloc_txq) { - dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", -- vsi->num_txq, vsi->alloc_txq); -+ tx_count, vsi->alloc_txq); - return -EINVAL; - } - -+ vsi->num_txq = tx_count; -+ vsi->num_rxq = rx_count; -+ - if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { - dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); - /* since there is a chance that num_rxq could have been changed -@@ -3487,6 +3489,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, - u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; - u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; - int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; -+ u16 new_txq, new_rxq; - u8 netdev_tc = 0; - int i; - -@@ -3527,21 +3530,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, - } - } - -- /* Set actual Tx/Rx queue pairs */ -- vsi->num_txq = offset + qcount_tx; -- if (vsi->num_txq > vsi->alloc_txq) { -+ new_txq = offset + qcount_tx; -+ if (new_txq > vsi->alloc_txq) { - dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", -- vsi->num_txq, vsi->alloc_txq); -+ new_txq, vsi->alloc_txq); - return -EINVAL; - } - -- vsi->num_rxq = offset + qcount_rx; -- if (vsi->num_rxq > vsi->alloc_rxq) { -+ new_rxq = offset + qcount_rx; -+ if (new_rxq > vsi->alloc_rxq) { - dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", -- vsi->num_rxq, vsi->alloc_rxq); -+ new_rxq, vsi->alloc_rxq); - return -EINVAL; - } - -+ /* Set actual Tx/Rx queue pairs */ -+ vsi->num_txq = new_txq; -+ vsi->num_rxq = new_rxq; -+ - /* Setup queue TC[0].qmap for given VSI context */ - ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); - ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); -@@ -3573,6 +3579,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) - { - u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - struct ice_pf *pf = vsi->back; -+ struct ice_tc_cfg old_tc_cfg; - struct ice_vsi_ctx *ctx; - struct device *dev; - int i, ret = 0; -@@ -3597,6 +3604,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) - max_txqs[i] = vsi->num_txq; - } - -+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); - vsi->tc_cfg.ena_tc = ena_tc; - vsi->tc_cfg.numtc = num_tc; - -@@ -3613,8 +3621,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) - else - ret = ice_vsi_setup_q_map(vsi, ctx); - -- if (ret) -+ if (ret) { -+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); - goto out; -+ } - - /* must to indicate which section of VSI context are being modified */ - ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); -diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c -index 4c6bb7482b362..48befe1e2872c 100644 ---- a/drivers/net/ethernet/intel/ice/ice_main.c -+++ b/drivers/net/ethernet/intel/ice/ice_main.c -@@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) - return -EBUSY; - } - -- ice_unplug_aux_dev(pf); -- - switch (reset) { - case ICE_RESET_PFR: - set_bit(ICE_PFR_REQ, pf->state); -@@ -6629,7 +6627,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) - */ - int ice_down(struct ice_vsi *vsi) - { -- int i, tx_err, rx_err, link_err = 0, vlan_err = 0; -+ int i, tx_err, rx_err, vlan_err = 0; - - WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); - -@@ -6663,20 +6661,13 @@ int ice_down(struct ice_vsi *vsi) - - ice_napi_disable_all(vsi); - -- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { -- link_err = ice_force_phys_link_state(vsi, false); -- if (link_err) -- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", -- vsi->vsi_num, link_err); -- } -- - ice_for_each_txq(vsi, i) - ice_clean_tx_ring(vsi->tx_rings[i]); - - ice_for_each_rxq(vsi, i) - ice_clean_rx_ring(vsi->rx_rings[i]); - -- if (tx_err || rx_err || link_err || vlan_err) { -+ if (tx_err || rx_err || vlan_err) { - netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", - vsi->vsi_num, vsi->vsw->sw_id); - return -EIO; -@@ -6838,6 +6829,8 @@ int ice_vsi_open(struct ice_vsi *vsi) - if (err) - goto err_setup_rx; - -+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); -+ - if (vsi->type == ICE_VSI_PF) { - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); -@@ -8876,6 +8869,16 @@ int ice_stop(struct net_device *netdev) - return -EBUSY; - } - -+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { -+ int link_err = ice_force_phys_link_state(vsi, false); -+ -+ if (link_err) { -+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", -+ vsi->vsi_num, link_err); -+ return -EIO; -+ } -+ } -+ - ice_vsi_close(vsi); - - return 0; -diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c -index 836dce8407124..97453d1dfafed 100644 ---- a/drivers/net/ethernet/intel/ice/ice_txrx.c -+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c -@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, - if (test_bit(ICE_VSI_DOWN, vsi->state)) - return -ENETDOWN; - -- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) -+ if (!ice_is_xdp_ena_vsi(vsi)) - return -ENXIO; - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) -@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, - xdp_ring = vsi->xdp_rings[queue_index]; - spin_lock(&xdp_ring->tx_lock); - } else { -+ /* Generally, should not happen */ -+ if (unlikely(queue_index >= vsi->num_xdp_txq)) -+ return -ENXIO; - xdp_ring = vsi->xdp_rings[queue_index]; - } - -diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c -index 85155cd9405c5..4aeb927c37153 100644 ---- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c -+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c -@@ -179,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg) - /* Only return ad bits of the gw register */ - ret &= MLXBF_GIGE_MDIO_GW_AD_MASK; - -+ /* The MDIO lock is set on read. To release it, clear gw register */ -+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); -+ - return ret; - } - -@@ -203,6 +206,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add, - temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK), - 5, 1000000); - -+ /* The MDIO lock is set on read. To release it, clear gw register */ -+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET); -+ - return ret; - } - -diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c -index 49b85ca578b01..9820efce72ffe 100644 ---- a/drivers/net/ethernet/microsoft/mana/gdma_main.c -+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c -@@ -370,6 +370,11 @@ static void mana_gd_process_eq_events(void *arg) - break; - } - -+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before -+ * reading eqe. -+ */ -+ rmb(); -+ - mana_gd_process_eqe(eq); - - eq->head++; -@@ -1107,6 +1112,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) - if (WARN_ON_ONCE(owner_bits != new_bits)) - return -1; - -+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before -+ * reading completion info -+ */ -+ rmb(); -+ - comp->wq_num = cqe->cqe_info.wq_num; - comp->is_sq = cqe->cqe_info.is_sq; - memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE); -diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c -index b357ac4c56c59..7e32b04eb0c75 100644 ---- a/drivers/net/ethernet/renesas/ravb_main.c -+++ b/drivers/net/ethernet/renesas/ravb_main.c -@@ -1449,6 +1449,8 @@ static int ravb_phy_init(struct net_device *ndev) - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - } - -+ /* Indicate that the MAC is responsible for managing PHY PM */ -+ phydev->mac_managed_pm = true; - phy_attached_info(phydev); - - return 0; -diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c -index 67ade78fb7671..7fd8828d3a846 100644 ---- a/drivers/net/ethernet/renesas/sh_eth.c -+++ b/drivers/net/ethernet/renesas/sh_eth.c -@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev) - if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) - phy_set_max_speed(phydev, SPEED_100); - -+ /* Indicate that the MAC is responsible for managing PHY PM */ -+ phydev->mac_managed_pm = true; - phy_attached_info(phydev); - - return 0; -diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c -index 032b8c0bd7889..5b4d661ab9867 100644 ---- a/drivers/net/ethernet/sfc/efx_channels.c -+++ b/drivers/net/ethernet/sfc/efx_channels.c -@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx) - efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); - efx->n_rx_channels = 1; - efx->n_tx_channels = 1; -- efx->tx_channel_offset = 1; -+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0; - efx->n_xdp_channels = 0; - efx->xdp_channel_offset = efx->n_channels; - efx->legacy_irq = efx->pci_dev->irq; -diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c -index 017212a40df38..f54ebd0072868 100644 ---- a/drivers/net/ethernet/sfc/siena/efx_channels.c -+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c -@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) - efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); - efx->n_rx_channels = 1; - efx->n_tx_channels = 1; -- efx->tx_channel_offset = 1; -+ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; - efx->n_xdp_channels = 0; - efx->xdp_channel_offset = efx->n_channels; - efx->legacy_irq = efx->pci_dev->irq; -diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c -index e166dcb9b99ce..91e87594ed1ea 100644 ---- a/drivers/net/ethernet/sfc/siena/tx.c -+++ b/drivers/net/ethernet/sfc/siena/tx.c -@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, - * previous packets out. - */ - if (!netdev_xmit_more()) -- efx_tx_send_pending(tx_queue->channel); -+ efx_tx_send_pending(efx_get_tx_channel(efx, index)); - return NETDEV_TX_OK; - } - -diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c -index 138bca6113415..80ed7f760bd30 100644 ---- a/drivers/net/ethernet/sfc/tx.c -+++ b/drivers/net/ethernet/sfc/tx.c -@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, - * previous packets out. - */ - if (!netdev_xmit_more()) -- efx_tx_send_pending(tx_queue->channel); -+ efx_tx_send_pending(efx_get_tx_channel(efx, index)); - return NETDEV_TX_OK; - } - -diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c -index 8594ee839628b..88aa0d310aeef 100644 ---- a/drivers/net/ethernet/sun/sunhme.c -+++ b/drivers/net/ethernet/sun/sunhme.c -@@ -2020,9 +2020,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) - - skb_reserve(copy_skb, 2); - skb_put(copy_skb, len); -- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); -+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); - skb_copy_from_linear_data(skb, copy_skb->data, len); -- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); -+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE); - /* Reuse original ring buffer. */ - hme_write_rxd(hp, this, - (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), -diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c -index ec010cf2e816a..6f874f99b910c 100644 ---- a/drivers/net/ipa/ipa_qmi.c -+++ b/drivers/net/ipa/ipa_qmi.c -@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) - mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE); - req.v4_route_tbl_info_valid = 1; - req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset; -- req.v4_route_tbl_info.count = mem->size / sizeof(__le64); -+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; - - mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE); - req.v6_route_tbl_info_valid = 1; - req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset; -- req.v6_route_tbl_info.count = mem->size / sizeof(__le64); -+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; - - mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER); - req.v4_filter_tbl_start_valid = 1; -@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) - req.v4_hash_route_tbl_info_valid = 1; - req.v4_hash_route_tbl_info.start = - ipa->mem_offset + mem->offset; -- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64); -+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; - } - - mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED); -@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi) - req.v6_hash_route_tbl_info_valid = 1; - req.v6_hash_route_tbl_info.start = - ipa->mem_offset + mem->offset; -- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64); -+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1; - } - - mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED); -diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c -index 6838e8065072b..75d3fc0092e92 100644 ---- a/drivers/net/ipa/ipa_qmi_msg.c -+++ b/drivers/net/ipa/ipa_qmi_msg.c -@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { - .tlv_type = 0x12, - .offset = offsetof(struct ipa_init_modem_driver_req, - v4_route_tbl_info), -- .ei_array = ipa_mem_array_ei, -+ .ei_array = ipa_mem_bounds_ei, - }, - { - .data_type = QMI_OPT_FLAG, -@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { - .tlv_type = 0x13, - .offset = offsetof(struct ipa_init_modem_driver_req, - v6_route_tbl_info), -- .ei_array = ipa_mem_array_ei, -+ .ei_array = ipa_mem_bounds_ei, - }, - { - .data_type = QMI_OPT_FLAG, -@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { - .tlv_type = 0x1b, - .offset = offsetof(struct ipa_init_modem_driver_req, - v4_hash_route_tbl_info), -- .ei_array = ipa_mem_array_ei, -+ .ei_array = ipa_mem_bounds_ei, - }, - { - .data_type = QMI_OPT_FLAG, -@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { - .tlv_type = 0x1c, - .offset = offsetof(struct ipa_init_modem_driver_req, - v6_hash_route_tbl_info), -- .ei_array = ipa_mem_array_ei, -+ .ei_array = ipa_mem_bounds_ei, - }, - { - .data_type = QMI_OPT_FLAG, -diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h -index 495e85abe50bd..9651aa59b5968 100644 ---- a/drivers/net/ipa/ipa_qmi_msg.h -+++ b/drivers/net/ipa/ipa_qmi_msg.h -@@ -86,9 +86,11 @@ enum ipa_platform_type { - IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */ - }; - --/* This defines the start and end offset of a range of memory. Both -- * fields are offsets relative to the start of IPA shared memory. -- * The end value is the last addressable byte *within* the range. -+/* This defines the start and end offset of a range of memory. The start -+ * value is a byte offset relative to the start of IPA shared memory. The -+ * end value is the last addressable unit *within* the range. Typically -+ * the end value is in units of bytes, however it can also be a maximum -+ * array index value. - */ - struct ipa_mem_bounds { - u32 start; -@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req { - u8 hdr_tbl_info_valid; - struct ipa_mem_bounds hdr_tbl_info; - -- /* Routing table information. These define the location and size of -- * non-hashable IPv4 and IPv6 filter tables. The start values are -- * offsets relative to the start of IPA shared memory. -+ /* Routing table information. These define the location and maximum -+ * *index* (not byte) for the modem portion of non-hashable IPv4 and -+ * IPv6 routing tables. The start values are byte offsets relative -+ * to the start of IPA shared memory. - */ - u8 v4_route_tbl_info_valid; -- struct ipa_mem_array v4_route_tbl_info; -+ struct ipa_mem_bounds v4_route_tbl_info; - u8 v6_route_tbl_info_valid; -- struct ipa_mem_array v6_route_tbl_info; -+ struct ipa_mem_bounds v6_route_tbl_info; - - /* Filter table information. These define the location of the - * non-hashable IPv4 and IPv6 filter tables. The start values are -- * offsets relative to the start of IPA shared memory. -+ * byte offsets relative to the start of IPA shared memory. - */ - u8 v4_filter_tbl_start_valid; - u32 v4_filter_tbl_start; -@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req { - u8 zip_tbl_info_valid; - struct ipa_mem_bounds zip_tbl_info; - -- /* Routing table information. These define the location and size -- * of hashable IPv4 and IPv6 filter tables. The start values are -- * offsets relative to the start of IPA shared memory. -+ /* Routing table information. These define the location and maximum -+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6 -+ * routing tables (if supported by hardware). The start values are -+ * byte offsets relative to the start of IPA shared memory. - */ - u8 v4_hash_route_tbl_info_valid; -- struct ipa_mem_array v4_hash_route_tbl_info; -+ struct ipa_mem_bounds v4_hash_route_tbl_info; - u8 v6_hash_route_tbl_info_valid; -- struct ipa_mem_array v6_hash_route_tbl_info; -+ struct ipa_mem_bounds v6_hash_route_tbl_info; - - /* Filter table information. These define the location and size -- * of hashable IPv4 and IPv6 filter tables. The start values are -- * offsets relative to the start of IPA shared memory. -+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware). -+ * The start values are byte offsets relative to the start of IPA -+ * shared memory. - */ - u8 v4_hash_filter_tbl_start_valid; - u32 v4_hash_filter_tbl_start; -diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c -index 2f5a58bfc529a..69efe672ca528 100644 ---- a/drivers/net/ipa/ipa_table.c -+++ b/drivers/net/ipa/ipa_table.c -@@ -108,8 +108,6 @@ - - /* Assignment of route table entries to the modem and AP */ - #define IPA_ROUTE_MODEM_MIN 0 --#define IPA_ROUTE_MODEM_COUNT 8 -- - #define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT - #define IPA_ROUTE_AP_COUNT \ - (IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT) -diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h -index b6a9a0d79d68e..1538e2e1732fe 100644 ---- a/drivers/net/ipa/ipa_table.h -+++ b/drivers/net/ipa/ipa_table.h -@@ -13,6 +13,9 @@ struct ipa; - /* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */ - #define IPA_FILTER_COUNT_MAX 14 - -+/* The number of route table entries allotted to the modem */ -+#define IPA_ROUTE_MODEM_COUNT 8 -+ - /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ - #define IPA_ROUTE_COUNT_MAX 15 - -diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c -index 6ffb27419e64b..c58123e136896 100644 ---- a/drivers/net/ipvlan/ipvlan_core.c -+++ b/drivers/net/ipvlan/ipvlan_core.c -@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) - - static int ipvlan_process_outbound(struct sk_buff *skb) - { -- struct ethhdr *ethh = eth_hdr(skb); - int ret = NET_XMIT_DROP; - - /* The ipvlan is a pseudo-L2 device, so the packets that we receive -@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb) - if (skb_mac_header_was_set(skb)) { - /* In this mode we dont care about - * multicast and broadcast traffic */ -+ struct ethhdr *ethh = eth_hdr(skb); -+ - if (is_multicast_ether_addr(ethh->h_dest)) { - pr_debug_ratelimited( - "Dropped {multi|broad}cast of type=[%x]\n", -@@ -589,7 +590,7 @@ out: - static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) - { - const struct ipvl_dev *ipvlan = netdev_priv(dev); -- struct ethhdr *eth = eth_hdr(skb); -+ struct ethhdr *eth = skb_eth_hdr(skb); - struct ipvl_addr *addr; - void *lyr3h; - int addr_type; -@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) - return dev_forward_skb(ipvlan->phy_dev, skb); - - } else if (is_multicast_ether_addr(eth->h_dest)) { -+ skb_reset_mac_header(skb); - ipvlan_skb_crossing_ns(skb, NULL); - ipvlan_multicast_enqueue(ipvlan->port, skb, true); - return NET_XMIT_SUCCESS; -diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c -index 9e3c815a070f1..796e9c7857d09 100644 ---- a/drivers/net/mdio/of_mdio.c -+++ b/drivers/net/mdio/of_mdio.c -@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) - return 0; - - unregister: -+ of_node_put(child); - mdiobus_unregister(mdio); - return rc; - } -diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c -index 605a38e16db05..0e58aa7f0374e 100644 ---- a/drivers/net/netdevsim/hwstats.c -+++ b/drivers/net/netdevsim/hwstats.c -@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev) - goto err_remove_hwstats_recursive; - } - -- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats, -+ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats, - &nsim_dev_hwstats_l3_enable_fops.fops); -- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats, -+ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats, - &nsim_dev_hwstats_l3_disable_fops.fops); -- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats, -+ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats, - &nsim_dev_hwstats_l3_fail_fops.fops); - - INIT_DELAYED_WORK(&hwstats->traffic_dw, -diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c -index c7047f5d7a9b0..8bc0957a0f6d3 100644 ---- a/drivers/net/phy/aquantia_main.c -+++ b/drivers/net/phy/aquantia_main.c -@@ -90,6 +90,9 @@ - #define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8) - #define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0) - -+#define VEND1_GLOBAL_GEN_STAT2 0xc831 -+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15) -+ - #define VEND1_GLOBAL_RSVD_STAT1 0xc885 - #define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4) - #define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0) -@@ -124,6 +127,12 @@ - #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) - #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) - -+/* Sleep and timeout for checking if the Processor-Intensive -+ * MDIO operation is finished -+ */ -+#define AQR107_OP_IN_PROG_SLEEP 1000 -+#define AQR107_OP_IN_PROG_TIMEOUT 100000 -+ - struct aqr107_hw_stat { - const char *name; - int reg; -@@ -596,16 +605,52 @@ static void aqr107_link_change_notify(struct phy_device *phydev) - phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n"); - } - -+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev) -+{ -+ int val, err; -+ -+ /* The datasheet notes to wait at least 1ms after issuing a -+ * processor intensive operation before checking. -+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout -+ * because that just determines the maximum time slept, not the minimum. -+ */ -+ usleep_range(1000, 5000); -+ -+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, -+ VEND1_GLOBAL_GEN_STAT2, val, -+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG), -+ AQR107_OP_IN_PROG_SLEEP, -+ AQR107_OP_IN_PROG_TIMEOUT, false); -+ if (err) { -+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ - static int aqr107_suspend(struct phy_device *phydev) - { -- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, -- MDIO_CTRL1_LPOWER); -+ int err; -+ -+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, -+ MDIO_CTRL1_LPOWER); -+ if (err) -+ return err; -+ -+ return aqr107_wait_processor_intensive_op(phydev); - } - - static int aqr107_resume(struct phy_device *phydev) - { -- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, -- MDIO_CTRL1_LPOWER); -+ int err; -+ -+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1, -+ MDIO_CTRL1_LPOWER); -+ if (err) -+ return err; -+ -+ return aqr107_wait_processor_intensive_op(phydev); - } - - static int aqr107_probe(struct phy_device *phydev) -diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c -index 34483a4bd688a..e8e1101911b2f 100644 ---- a/drivers/net/phy/micrel.c -+++ b/drivers/net/phy/micrel.c -@@ -2662,16 +2662,19 @@ static int lan8804_config_init(struct phy_device *phydev) - static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) - { - int irq_status, tsu_irq_status; -+ int ret = IRQ_NONE; - - irq_status = phy_read(phydev, LAN8814_INTS); -- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK)) -- phy_trigger_machine(phydev); -- - if (irq_status < 0) { - phy_error(phydev); - return IRQ_NONE; - } - -+ if (irq_status & LAN8814_INT_LINK) { -+ phy_trigger_machine(phydev); -+ ret = IRQ_HANDLED; -+ } -+ - while (1) { - tsu_irq_status = lanphy_read_page_reg(phydev, 4, - LAN8814_INTR_STS_REG); -@@ -2680,12 +2683,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) - (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ | - LAN8814_INTR_STS_REG_1588_TSU1_ | - LAN8814_INTR_STS_REG_1588_TSU2_ | -- LAN8814_INTR_STS_REG_1588_TSU3_))) -+ LAN8814_INTR_STS_REG_1588_TSU3_))) { - lan8814_handle_ptp_interrupt(phydev); -- else -+ ret = IRQ_HANDLED; -+ } else { - break; -+ } - } -- return IRQ_HANDLED; -+ -+ return ret; - } - - static int lan8814_ack_interrupt(struct phy_device *phydev) -diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c -index b07dde6f0abf2..b9899913d2467 100644 ---- a/drivers/net/team/team.c -+++ b/drivers/net/team/team.c -@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev, - } - } - -- netif_addr_lock_bh(dev); -- dev_uc_sync_multiple(port_dev, dev); -- dev_mc_sync_multiple(port_dev, dev); -- netif_addr_unlock_bh(dev); -+ if (dev->flags & IFF_UP) { -+ netif_addr_lock_bh(dev); -+ dev_uc_sync_multiple(port_dev, dev); -+ dev_mc_sync_multiple(port_dev, dev); -+ netif_addr_unlock_bh(dev); -+ } - - port->index = -1; - list_add_tail_rcu(&port->list, &team->port_list); -@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev) - netdev_rx_handler_unregister(port_dev); - team_port_disable_netpoll(port); - vlan_vids_del_by_dev(port_dev, dev); -- dev_uc_unsync(port_dev, dev); -- dev_mc_unsync(port_dev, dev); -+ if (dev->flags & IFF_UP) { -+ dev_uc_unsync(port_dev, dev); -+ dev_mc_unsync(port_dev, dev); -+ } - dev_close(port_dev); - team_port_leave(team, port); - -@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev) - - static int team_close(struct net_device *dev) - { -+ struct team *team = netdev_priv(dev); -+ struct team_port *port; -+ -+ list_for_each_entry(port, &team->port_list, list) { -+ dev_uc_unsync(port->dev, dev); -+ dev_mc_unsync(port->dev, dev); -+ } -+ - return 0; - } - -diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c -index d0f3b6d7f4089..5c804bcabfe6b 100644 ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs) - if (attrs[WGPEER_A_ENDPOINT]) { - struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); - size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); -+ struct endpoint endpoint = { { { 0 } } }; - -- if ((len == sizeof(struct sockaddr_in) && -- addr->sa_family == AF_INET) || -- (len == sizeof(struct sockaddr_in6) && -- addr->sa_family == AF_INET6)) { -- struct endpoint endpoint = { { { 0 } } }; -- -- memcpy(&endpoint.addr, addr, len); -+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { -+ endpoint.addr4 = *(struct sockaddr_in *)addr; -+ wg_socket_set_peer_endpoint(peer, &endpoint); -+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { -+ endpoint.addr6 = *(struct sockaddr_in6 *)addr; - wg_socket_set_peer_endpoint(peer, &endpoint); - } - } -diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c -index ba87d294604fe..d4bb40a695ab6 100644 ---- a/drivers/net/wireguard/selftest/ratelimiter.c -+++ b/drivers/net/wireguard/selftest/ratelimiter.c -@@ -6,29 +6,28 @@ - #ifdef DEBUG - - #include --#include - - static const struct { - bool result; -- u64 nsec_to_sleep_before; -+ unsigned int msec_to_sleep_before; - } expected_results[] __initconst = { - [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, - [PACKETS_BURSTABLE] = { false, 0 }, -- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND }, -+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, - [PACKETS_BURSTABLE + 2] = { false, 0 }, -- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, -+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, - [PACKETS_BURSTABLE + 4] = { true, 0 }, - [PACKETS_BURSTABLE + 5] = { false, 0 } - }; - - static __init unsigned int maximum_jiffies_at_index(int index) - { -- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3; -+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; - int i; - - for (i = 0; i <= index; ++i) -- total_nsecs += expected_results[i].nsec_to_sleep_before; -- return nsecs_to_jiffies(total_nsecs); -+ total_msecs += expected_results[i].msec_to_sleep_before; -+ return msecs_to_jiffies(total_msecs); - } - - static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, -@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, - loop_start_time = jiffies; - - for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { -- if (expected_results[i].nsec_to_sleep_before) { -- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3), -- ns_to_ktime(expected_results[i].nsec_to_sleep_before)); -- set_current_state(TASK_UNINTERRUPTIBLE); -- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME); -- } -+ if (expected_results[i].msec_to_sleep_before) -+ msleep(expected_results[i].msec_to_sleep_before); - - if (time_is_before_jiffies(loop_start_time + - maximum_jiffies_at_index(i))) -@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void) - if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) - return true; - -- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0); -+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); - - if (wg_ratelimiter_init()) - goto out; -@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void) - ++test; - #endif - -- for (trials = TRIALS_BEFORE_GIVING_UP;;) { -+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) { - int test_count = 0, ret; - - ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); -diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig -index a647a406b87be..b20409f8c13ab 100644 ---- a/drivers/net/wireless/intel/iwlwifi/Kconfig -+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig -@@ -140,6 +140,7 @@ config IWLMEI - depends on INTEL_MEI - depends on PM - depends on CFG80211 -+ depends on BROKEN - help - Enables the iwlmei kernel module. - -diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c -index 9e832b27170fe..a4eb025f504f3 100644 ---- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c -+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c -@@ -1138,7 +1138,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) - offset %= 32; - - val = mt76_rr(dev, addr); -- val >>= (tid % 32); -+ val >>= offset; - - if (offset > 20) { - addr += 4; -diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c -index 629d10fcf53b2..b9f1a8e9f88cb 100644 ---- a/drivers/nvdimm/pmem.c -+++ b/drivers/nvdimm/pmem.c -@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem) - return to_nd_region(to_dev(pmem)->parent); - } - --static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset) -+static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset) - { - return pmem->phys_addr + offset; - } -@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) - static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset, - unsigned int len) - { -- phys_addr_t phys = to_phys(pmem, offset); -+ phys_addr_t phys = pmem_to_phys(pmem, offset); - unsigned long pfn_start, pfn_end, pfn; - - /* only pmem in the linear map supports HWPoison */ -@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) - static long __pmem_clear_poison(struct pmem_device *pmem, - phys_addr_t offset, unsigned int len) - { -- phys_addr_t phys = to_phys(pmem, offset); -+ phys_addr_t phys = pmem_to_phys(pmem, offset); - long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len); - - if (cleared > 0) { -diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c -index d702d7d60235d..2d23b7d41f7e6 100644 ---- a/drivers/nvme/host/apple.c -+++ b/drivers/nvme/host/apple.c -@@ -1502,7 +1502,7 @@ static int apple_nvme_probe(struct platform_device *pdev) - - if (!blk_get_queue(anv->ctrl.admin_q)) { - nvme_start_admin_queue(&anv->ctrl); -- blk_cleanup_queue(anv->ctrl.admin_q); -+ blk_mq_destroy_queue(anv->ctrl.admin_q); - anv->ctrl.admin_q = NULL; - ret = -ENODEV; - goto put_dev; -diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c -index 2f965356f3453..6d76fc608b741 100644 ---- a/drivers/nvme/host/core.c -+++ b/drivers/nvme/host/core.c -@@ -4105,7 +4105,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) - if (!nvme_ns_head_multipath(ns->head)) - nvme_cdev_del(&ns->cdev, &ns->cdev_device); - del_gendisk(ns->disk); -- blk_cleanup_queue(ns->queue); - - down_write(&ns->ctrl->namespaces_rwsem); - list_del_init(&ns->list); -diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c -index 4aff83b1b0c05..9a5ce70d7f215 100644 ---- a/drivers/nvme/host/fc.c -+++ b/drivers/nvme/host/fc.c -@@ -2392,7 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref) - unsigned long flags; - - if (ctrl->ctrl.tagset) { -- blk_cleanup_queue(ctrl->ctrl.connect_q); -+ blk_mq_destroy_queue(ctrl->ctrl.connect_q); - blk_mq_free_tag_set(&ctrl->tag_set); - } - -@@ -2402,8 +2402,8 @@ nvme_fc_ctrl_free(struct kref *ref) - spin_unlock_irqrestore(&ctrl->rport->lock, flags); - - nvme_start_admin_queue(&ctrl->ctrl); -- blk_cleanup_queue(ctrl->ctrl.admin_q); -- blk_cleanup_queue(ctrl->ctrl.fabrics_q); -+ blk_mq_destroy_queue(ctrl->ctrl.admin_q); -+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); - blk_mq_free_tag_set(&ctrl->admin_tag_set); - - kfree(ctrl->queues); -@@ -2953,7 +2953,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) - out_delete_hw_queues: - nvme_fc_delete_hw_io_queues(ctrl); - out_cleanup_blk_queue: -- blk_cleanup_queue(ctrl->ctrl.connect_q); -+ blk_mq_destroy_queue(ctrl->ctrl.connect_q); - out_free_tag_set: - blk_mq_free_tag_set(&ctrl->tag_set); - nvme_fc_free_io_queues(ctrl); -@@ -3642,9 +3642,9 @@ fail_ctrl: - return ERR_PTR(-EIO); - - out_cleanup_admin_q: -- blk_cleanup_queue(ctrl->ctrl.admin_q); -+ blk_mq_destroy_queue(ctrl->ctrl.admin_q); - out_cleanup_fabrics_q: -- blk_cleanup_queue(ctrl->ctrl.fabrics_q); -+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); - out_free_admin_tag_set: - blk_mq_free_tag_set(&ctrl->admin_tag_set); - out_free_queues: -diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c -index 9f6614f7dbeb1..3516678d37541 100644 ---- a/drivers/nvme/host/pci.c -+++ b/drivers/nvme/host/pci.c -@@ -1760,7 +1760,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev) - * queue to flush these to completion. - */ - nvme_start_admin_queue(&dev->ctrl); -- blk_cleanup_queue(dev->ctrl.admin_q); -+ blk_mq_destroy_queue(dev->ctrl.admin_q); - blk_mq_free_tag_set(&dev->admin_tagset); - } - } -diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c -index 46c2dcf72f7ea..240024dd5d857 100644 ---- a/drivers/nvme/host/rdma.c -+++ b/drivers/nvme/host/rdma.c -@@ -840,8 +840,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, - bool remove) - { - if (remove) { -- blk_cleanup_queue(ctrl->ctrl.admin_q); -- blk_cleanup_queue(ctrl->ctrl.fabrics_q); -+ blk_mq_destroy_queue(ctrl->ctrl.admin_q); -+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); - blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); - } - if (ctrl->async_event_sqe.data) { -@@ -935,10 +935,10 @@ out_stop_queue: - nvme_cancel_admin_tagset(&ctrl->ctrl); - out_cleanup_queue: - if (new) -- blk_cleanup_queue(ctrl->ctrl.admin_q); -+ blk_mq_destroy_queue(ctrl->ctrl.admin_q); - out_cleanup_fabrics_q: - if (new) -- blk_cleanup_queue(ctrl->ctrl.fabrics_q); -+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); - out_free_tagset: - if (new) - blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); -@@ -957,7 +957,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, - bool remove) - { - if (remove) { -- blk_cleanup_queue(ctrl->ctrl.connect_q); -+ blk_mq_destroy_queue(ctrl->ctrl.connect_q); - blk_mq_free_tag_set(ctrl->ctrl.tagset); - } - nvme_rdma_free_io_queues(ctrl); -@@ -1012,7 +1012,7 @@ out_wait_freeze_timed_out: - out_cleanup_connect_q: - nvme_cancel_tagset(&ctrl->ctrl); - if (new) -- blk_cleanup_queue(ctrl->ctrl.connect_q); -+ blk_mq_destroy_queue(ctrl->ctrl.connect_q); - out_free_tag_set: - if (new) - blk_mq_free_tag_set(ctrl->ctrl.tagset); -diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c -index daa0e160e1212..d7e5bbdb9b75a 100644 ---- a/drivers/nvme/host/tcp.c -+++ b/drivers/nvme/host/tcp.c -@@ -1881,7 +1881,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) - { - nvme_tcp_stop_io_queues(ctrl); - if (remove) { -- blk_cleanup_queue(ctrl->connect_q); -+ blk_mq_destroy_queue(ctrl->connect_q); - blk_mq_free_tag_set(ctrl->tagset); - } - nvme_tcp_free_io_queues(ctrl); -@@ -1936,7 +1936,7 @@ out_wait_freeze_timed_out: - out_cleanup_connect_q: - nvme_cancel_tagset(ctrl); - if (new) -- blk_cleanup_queue(ctrl->connect_q); -+ blk_mq_destroy_queue(ctrl->connect_q); - out_free_tag_set: - if (new) - blk_mq_free_tag_set(ctrl->tagset); -@@ -1949,8 +1949,8 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) - { - nvme_tcp_stop_queue(ctrl, 0); - if (remove) { -- blk_cleanup_queue(ctrl->admin_q); -- blk_cleanup_queue(ctrl->fabrics_q); -+ blk_mq_destroy_queue(ctrl->admin_q); -+ blk_mq_destroy_queue(ctrl->fabrics_q); - blk_mq_free_tag_set(ctrl->admin_tagset); - } - nvme_tcp_free_admin_queue(ctrl); -@@ -2008,10 +2008,10 @@ out_stop_queue: - nvme_cancel_admin_tagset(ctrl); - out_cleanup_queue: - if (new) -- blk_cleanup_queue(ctrl->admin_q); -+ blk_mq_destroy_queue(ctrl->admin_q); - out_cleanup_fabrics_q: - if (new) -- blk_cleanup_queue(ctrl->fabrics_q); -+ blk_mq_destroy_queue(ctrl->fabrics_q); - out_free_tagset: - if (new) - blk_mq_free_tag_set(ctrl->admin_tagset); -diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c -index 59024af2da2e3..0f5c77e22a0a9 100644 ---- a/drivers/nvme/target/loop.c -+++ b/drivers/nvme/target/loop.c -@@ -266,8 +266,8 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) - if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) - return; - nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); -- blk_cleanup_queue(ctrl->ctrl.admin_q); -- blk_cleanup_queue(ctrl->ctrl.fabrics_q); -+ blk_mq_destroy_queue(ctrl->ctrl.admin_q); -+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); - blk_mq_free_tag_set(&ctrl->admin_tag_set); - } - -@@ -283,7 +283,7 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) - mutex_unlock(&nvme_loop_ctrl_mutex); - - if (nctrl->tagset) { -- blk_cleanup_queue(ctrl->ctrl.connect_q); -+ blk_mq_destroy_queue(ctrl->ctrl.connect_q); - blk_mq_free_tag_set(&ctrl->tag_set); - } - kfree(ctrl->queues); -@@ -410,9 +410,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) - - out_cleanup_queue: - clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); -- blk_cleanup_queue(ctrl->ctrl.admin_q); -+ blk_mq_destroy_queue(ctrl->ctrl.admin_q); - out_cleanup_fabrics_q: -- blk_cleanup_queue(ctrl->ctrl.fabrics_q); -+ blk_mq_destroy_queue(ctrl->ctrl.fabrics_q); - out_free_tagset: - blk_mq_free_tag_set(&ctrl->admin_tag_set); - out_free_sq: -@@ -554,7 +554,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) - return 0; - - out_cleanup_connect_q: -- blk_cleanup_queue(ctrl->ctrl.connect_q); -+ blk_mq_destroy_queue(ctrl->ctrl.connect_q); - out_free_tagset: - blk_mq_free_tag_set(&ctrl->tag_set); - out_destroy_queues: -diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c -index 80d8309652a4d..b80a9b74662b1 100644 ---- a/drivers/perf/arm-cmn.c -+++ b/drivers/perf/arm-cmn.c -@@ -36,7 +36,7 @@ - #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) - #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) - --#define CMN_CHILD_NODE_ADDR GENMASK(27, 0) -+#define CMN_CHILD_NODE_ADDR GENMASK(29, 0) - #define CMN_CHILD_NODE_EXTERNAL BIT(31) - - #define CMN_MAX_DIMENSION 12 -diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -index a4d7d9bd100d3..67712c77d806f 100644 ---- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c -@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane { - int submode; - bool invert_tx; - bool invert_rx; -- bool needs_reset; - }; - - struct gbe_phy_init_data_fix { -@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane) - 0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT); - } - --static int mvebu_a3700_comphy_reset(struct phy *phy) -+static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane) - { -- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -- u16 mask, data; -- -- dev_dbg(lane->dev, "resetting lane %d\n", lane->id); -- -- /* COMPHY reset for internal logic */ -- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, -- SFT_RST_NO_REG, SFT_RST_NO_REG); -- -- /* COMPHY register reset (cleared automatically) */ -- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST); -- -- /* PIPE soft and register reset */ -- data = PIPE_SOFT_RESET | PIPE_REG_RESET; -- mask = data; -- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask); -- -- /* Release PIPE register reset */ -- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, -- 0x0, PIPE_REG_RESET); -- -- /* Reset SB configuration register (only for lanes 0 and 1) */ -- if (lane->id == 0 || lane->id == 1) { -- u32 mask, data; -- -- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT | -- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT; -- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT; -- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask); -- } -- -- return 0; -+ /* -+ * The USB3 MAC sets the USB3 PHY to low state, so we do not -+ * need to power off USB3 PHY again. -+ */ - } - - static bool mvebu_a3700_comphy_check_mode(int lane, -@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, - (lane->mode != mode || lane->submode != submode)) - return -EBUSY; - -- /* If changing mode, ensure reset is called */ -- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode) -- lane->needs_reset = true; -- - /* Just remember the mode, ->power_on() will do the real setup */ - lane->mode = mode; - lane->submode = submode; -@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, - static int mvebu_a3700_comphy_power_on(struct phy *phy) - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); -- int ret; - - if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode, - lane->submode)) { -@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy) - return -EINVAL; - } - -- if (lane->needs_reset) { -- ret = mvebu_a3700_comphy_reset(phy); -- if (ret) -- return ret; -- -- lane->needs_reset = false; -- } -- - switch (lane->mode) { - case PHY_MODE_USB_HOST_SS: - dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); -@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy) - { - struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); - -- switch (lane->mode) { -- case PHY_MODE_USB_HOST_SS: -- /* -- * The USB3 MAC sets the USB3 PHY to low state, so we do not -- * need to power off USB3 PHY again. -- */ -- break; -- -- case PHY_MODE_SATA: -- mvebu_a3700_comphy_sata_power_off(lane); -- break; -- -- case PHY_MODE_ETHERNET: -+ switch (lane->id) { -+ case 0: -+ mvebu_a3700_comphy_usb3_power_off(lane); - mvebu_a3700_comphy_ethernet_power_off(lane); -- break; -- -- case PHY_MODE_PCIE: -+ return 0; -+ case 1: - mvebu_a3700_comphy_pcie_power_off(lane); -- break; -- -+ mvebu_a3700_comphy_ethernet_power_off(lane); -+ return 0; -+ case 2: -+ mvebu_a3700_comphy_usb3_power_off(lane); -+ mvebu_a3700_comphy_sata_power_off(lane); -+ return 0; - default: - dev_err(lane->dev, "invalid COMPHY mode\n"); - return -EINVAL; - } -- -- return 0; - } - - static const struct phy_ops mvebu_a3700_comphy_ops = { - .power_on = mvebu_a3700_comphy_power_on, - .power_off = mvebu_a3700_comphy_power_off, -- .reset = mvebu_a3700_comphy_reset, - .set_mode = mvebu_a3700_comphy_set_mode, - .owner = THIS_MODULE, - }; -@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev) - * To avoid relying on the bootloader/firmware configuration, - * power off all comphys. - */ -- mvebu_a3700_comphy_reset(phy); -- lane->needs_reset = false; -+ mvebu_a3700_comphy_power_off(phy); - } - - provider = devm_of_phy_provider_register(&pdev->dev, -diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c -index ba6d787896606..e8489331f12b8 100644 ---- a/drivers/s390/block/dasd.c -+++ b/drivers/s390/block/dasd.c -@@ -3280,7 +3280,7 @@ static int dasd_alloc_queue(struct dasd_block *block) - static void dasd_free_queue(struct dasd_block *block) - { - if (block->request_queue) { -- blk_cleanup_queue(block->request_queue); -+ blk_mq_destroy_queue(block->request_queue); - blk_mq_free_tag_set(&block->tag_set); - block->request_queue = NULL; - } -diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c -index dc78a523a69f2..b6b938aa66158 100644 ---- a/drivers/s390/block/dasd_alias.c -+++ b/drivers/s390/block/dasd_alias.c -@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device) - struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) - { - struct dasd_eckd_private *alias_priv, *private = base_device->private; -- struct alias_pav_group *group = private->pavgroup; - struct alias_lcu *lcu = private->lcu; - struct dasd_device *alias_device; -+ struct alias_pav_group *group; - unsigned long flags; - -- if (!group || !lcu) -+ if (!lcu) - return NULL; - if (lcu->pav == NO_PAV || - lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) -@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) - } - - spin_lock_irqsave(&lcu->lock, flags); -+ group = private->pavgroup; -+ if (!group) { -+ spin_unlock_irqrestore(&lcu->lock, flags); -+ return NULL; -+ } - alias_device = group->next; - if (!alias_device) { - if (list_empty(&group->aliaslist)) { -diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c -index a7a33ebf4bbe9..5a83f0a39901b 100644 ---- a/drivers/s390/block/dasd_genhd.c -+++ b/drivers/s390/block/dasd_genhd.c -@@ -41,8 +41,8 @@ int dasd_gendisk_alloc(struct dasd_block *block) - if (base->devindex >= DASD_PER_MAJOR) - return -EBUSY; - -- gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE, -- &dasd_bio_compl_lkclass); -+ gdp = blk_mq_alloc_disk_for_queue(block->request_queue, -+ &dasd_bio_compl_lkclass); - if (!gdp) - return -ENOMEM; - -diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c -index 8352f90d997df..ae9a107c520d0 100644 ---- a/drivers/scsi/hosts.c -+++ b/drivers/scsi/hosts.c -@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost) - mutex_unlock(&shost->scan_mutex); - scsi_proc_host_rm(shost); - -+ /* -+ * New SCSI devices cannot be attached anymore because of the SCSI host -+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops -+ * to zero because .exit_cmd_priv implementations may need the host -+ * pointer. -+ */ -+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags); -+ wait_for_completion(&shost->tagset_freed); -+ - spin_lock_irqsave(shost->host_lock, flags); - if (scsi_host_set_state(shost, SHOST_DEL)) - BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY)); -@@ -240,6 +249,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, - if (error) - goto fail; - -+ kref_init(&shost->tagset_refcnt); -+ init_completion(&shost->tagset_freed); -+ - /* - * Increase usage count temporarily here so that calling - * scsi_autopm_put_host() will trigger runtime idle if there is -@@ -312,6 +324,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, - pm_runtime_disable(&shost->shost_gendev); - pm_runtime_set_suspended(&shost->shost_gendev); - pm_runtime_put_noidle(&shost->shost_gendev); -+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags); - fail: - return error; - } -@@ -345,9 +358,6 @@ static void scsi_host_dev_release(struct device *dev) - kfree(dev_name(&shost->shost_dev)); - } - -- if (shost->tag_set.tags) -- scsi_mq_destroy_tags(shost); -- - kfree(shost->shost_data); - - ida_simple_remove(&host_index_ida, shost->host_no); -diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c -index 9a1ae52bb621d..a6d3471a61057 100644 ---- a/drivers/scsi/mpt3sas/mpt3sas_base.c -+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c -@@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) - - if (ioc->is_mcpu_endpoint || - sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || -- dma_get_required_mask(&pdev->dev) <= 32) -+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) - ioc->dma_mask = 32; - /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ - else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) -diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c -index 62666df1a59eb..4acff4e84b909 100644 ---- a/drivers/scsi/qla2xxx/qla_target.c -+++ b/drivers/scsi/qla2xxx/qla_target.c -@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, - - abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, - le32_to_cpu(abts->exchange_addr_to_abort)); -- if (!abort_cmd) -+ if (!abort_cmd) { -+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); - return -EIO; -+ } - mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun; - - if (abort_cmd->qpair) { -diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c -index f5c876d03c1ad..7e990f7a9f164 100644 ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -168,7 +168,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) - * Requeue this command. It will go before all other commands - * that are already in the queue. Schedule requeue work under - * lock such that the kblockd_schedule_work() call happens -- * before blk_cleanup_queue() finishes. -+ * before blk_mq_destroy_queue() finishes. - */ - cmd->result = 0; - -@@ -429,9 +429,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) - * it and the queue. Mitigate by taking a reference to the - * queue and never touching the sdev again after we drop the - * host lock. Note: if __scsi_remove_device() invokes -- * blk_cleanup_queue() before the queue is run from this -+ * blk_mq_destroy_queue() before the queue is run from this - * function then blk_run_queue() will return immediately since -- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. -+ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. - */ - slq = sdev->request_queue; - if (!blk_get_queue(slq)) -@@ -1995,9 +1995,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) - return blk_mq_alloc_tag_set(tag_set); - } - --void scsi_mq_destroy_tags(struct Scsi_Host *shost) -+void scsi_mq_free_tags(struct kref *kref) - { -+ struct Scsi_Host *shost = container_of(kref, typeof(*shost), -+ tagset_refcnt); -+ - blk_mq_free_tag_set(&shost->tag_set); -+ complete(&shost->tagset_freed); - } - - /** -diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h -index 5c4786310a31d..a0ee31d55f5f1 100644 ---- a/drivers/scsi/scsi_priv.h -+++ b/drivers/scsi/scsi_priv.h -@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost); - extern void scsi_requeue_run_queue(struct work_struct *work); - extern void scsi_start_queue(struct scsi_device *sdev); - extern int scsi_mq_setup_tags(struct Scsi_Host *shost); --extern void scsi_mq_destroy_tags(struct Scsi_Host *shost); -+extern void scsi_mq_free_tags(struct kref *kref); - extern void scsi_exit_queue(void); - extern void scsi_evt_thread(struct work_struct *work); - -diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c -index 91ac901a66826..5d27f5196de6f 100644 ---- a/drivers/scsi/scsi_scan.c -+++ b/drivers/scsi/scsi_scan.c -@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, - kfree(sdev); - goto out; - } -+ kref_get(&sdev->host->tagset_refcnt); - sdev->request_queue = q; - q->queuedata = sdev; - __scsi_init_queue(sdev->host, q); -diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c -index 43949798a2e47..5d61f58399dca 100644 ---- a/drivers/scsi/scsi_sysfs.c -+++ b/drivers/scsi/scsi_sysfs.c -@@ -1475,7 +1475,8 @@ void __scsi_remove_device(struct scsi_device *sdev) - scsi_device_set_state(sdev, SDEV_DEL); - mutex_unlock(&sdev->state_mutex); - -- blk_cleanup_queue(sdev->request_queue); -+ blk_mq_destroy_queue(sdev->request_queue); -+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); - cancel_work_sync(&sdev->requeue_work); - - if (sdev->host->hostt->slave_destroy) -diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c -index a1a2ac09066fd..cb587e488601c 100644 ---- a/drivers/scsi/sd.c -+++ b/drivers/scsi/sd.c -@@ -3440,8 +3440,8 @@ static int sd_probe(struct device *dev) - if (!sdkp) - goto out; - -- gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE, -- &sd_bio_compl_lkclass); -+ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue, -+ &sd_bio_compl_lkclass); - if (!gd) - goto out_free; - -diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c -index 32d3b8274f148..a278b739d0c5f 100644 ---- a/drivers/scsi/sr.c -+++ b/drivers/scsi/sr.c -@@ -624,8 +624,8 @@ static int sr_probe(struct device *dev) - if (!cd) - goto fail; - -- disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE, -- &sr_bio_compl_lkclass); -+ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue, -+ &sr_bio_compl_lkclass); - if (!disk) - goto fail_free; - mutex_init(&cd->lock); -diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c -index fff0c740c8f33..6f088dd0ba4f3 100644 ---- a/drivers/thunderbolt/icm.c -+++ b/drivers/thunderbolt/icm.c -@@ -2527,6 +2527,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) - tb->cm_ops = &icm_icl_ops; - break; - -+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: - case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: - icm->is_supported = icm_tgl_is_supported; - icm->get_mode = icm_ar_get_mode; -diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h -index 69083aab2736c..5091677b3f4ba 100644 ---- a/drivers/thunderbolt/nhi.h -+++ b/drivers/thunderbolt/nhi.h -@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops; - * need for the PCI quirk anymore as we will use ICM also on Apple - * hardware. - */ -+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134 - #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 - #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d - #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e -diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c -index 2945c1b890880..cb83c66bd8a82 100644 ---- a/drivers/tty/serial/fsl_lpuart.c -+++ b/drivers/tty/serial/fsl_lpuart.c -@@ -2706,14 +2706,15 @@ static int lpuart_probe(struct platform_device *pdev) - lpuart_reg.cons = LPUART_CONSOLE; - handler = lpuart_int; - } -- ret = uart_add_one_port(&lpuart_reg, &sport->port); -- if (ret) -- goto failed_attach_port; - - ret = lpuart_global_reset(sport); - if (ret) - goto failed_reset; - -+ ret = uart_add_one_port(&lpuart_reg, &sport->port); -+ if (ret) -+ goto failed_attach_port; -+ - ret = uart_get_rs485_mode(&sport->port); - if (ret) - goto failed_get_rs485; -@@ -2736,9 +2737,9 @@ static int lpuart_probe(struct platform_device *pdev) - - failed_irq_request: - failed_get_rs485: --failed_reset: - uart_remove_one_port(&lpuart_reg, &sport->port); - failed_attach_port: -+failed_reset: - lpuart_disable_clks(sport); - return ret; - } -diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c -index d942ab152f5a4..24aa1dcc5ef7a 100644 ---- a/drivers/tty/serial/serial-tegra.c -+++ b/drivers/tty/serial/serial-tegra.c -@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args) - count = tup->tx_bytes_requested - state.residue; - async_tx_ack(tup->tx_dma_desc); - spin_lock_irqsave(&tup->uport.lock, flags); -- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); -+ uart_xmit_advance(&tup->uport, count); - tup->tx_in_progress = 0; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&tup->uport); -@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u) - static void tegra_uart_stop_tx(struct uart_port *u) - { - struct tegra_uart_port *tup = to_tegra_uport(u); -- struct circ_buf *xmit = &tup->uport.state->xmit; - struct dma_tx_state state; - unsigned int count; - -@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u) - dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state); - count = tup->tx_bytes_requested - state.residue; - async_tx_ack(tup->tx_dma_desc); -- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); -+ uart_xmit_advance(&tup->uport, count); - tup->tx_in_progress = 0; - } - -diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c -index 4877c54c613d1..889b701ba7c62 100644 ---- a/drivers/tty/serial/tegra-tcu.c -+++ b/drivers/tty/serial/tegra-tcu.c -@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port) - break; - - tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); -- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); -+ uart_xmit_advance(port, count); - } - - uart_write_wakeup(port); -diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c -index 829da9cb14a86..55bb0d0422d52 100644 ---- a/drivers/ufs/core/ufshcd.c -+++ b/drivers/ufs/core/ufshcd.c -@@ -9519,7 +9519,7 @@ void ufshcd_remove(struct ufs_hba *hba) - ufs_bsg_remove(hba); - ufshpb_remove(hba); - ufs_sysfs_remove_nodes(hba->dev); -- blk_cleanup_queue(hba->tmf_queue); -+ blk_mq_destroy_queue(hba->tmf_queue); - blk_mq_free_tag_set(&hba->tmf_tag_set); - scsi_remove_host(hba->host); - /* disable interrupts */ -@@ -9815,7 +9815,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) - return 0; - - free_tmf_queue: -- blk_cleanup_queue(hba->tmf_queue); -+ blk_mq_destroy_queue(hba->tmf_queue); - free_tmf_tag_set: - blk_mq_free_tag_set(&hba->tmf_tag_set); - out_remove_scsi_host: -diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index dfef85a18eb55..80b29f937c605 100644 ---- a/drivers/usb/core/hub.c -+++ b/drivers/usb/core/hub.c -@@ -6049,7 +6049,7 @@ re_enumerate: - * - * Return: The same as for usb_reset_and_verify_device(). - * However, if a reset is already in progress (for instance, if a -- * driver doesn't have pre_ or post_reset() callbacks, and while -+ * driver doesn't have pre_reset() or post_reset() callbacks, and while - * being unbound or re-bound during the ongoing reset its disconnect() - * or probe() routine tries to perform a second, nested reset), the - * routine returns -EINPROGRESS. -diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c -index 1db9f51f98aef..08ca65ffe57b7 100644 ---- a/drivers/usb/dwc3/core.c -+++ b/drivers/usb/dwc3/core.c -@@ -1718,12 +1718,6 @@ static int dwc3_probe(struct platform_device *pdev) - - dwc3_get_properties(dwc); - -- if (!dwc->sysdev_is_parent) { -- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); -- if (ret) -- return ret; -- } -- - dwc->reset = devm_reset_control_array_get_optional_shared(dev); - if (IS_ERR(dwc->reset)) - return PTR_ERR(dwc->reset); -@@ -1789,6 +1783,13 @@ static int dwc3_probe(struct platform_device *pdev) - platform_set_drvdata(pdev, dwc); - dwc3_cache_hwparams(dwc); - -+ if (!dwc->sysdev_is_parent && -+ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) { -+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64)); -+ if (ret) -+ goto disable_clks; -+ } -+ - spin_lock_init(&dwc->lock); - mutex_init(&dwc->mutex); - -diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c -index a5e8374a8d710..697683e3fbffa 100644 ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c -@@ -256,6 +256,7 @@ static void option_instat_callback(struct urb *urb); - #define QUECTEL_PRODUCT_EM060K 0x030b - #define QUECTEL_PRODUCT_EM12 0x0512 - #define QUECTEL_PRODUCT_RM500Q 0x0800 -+#define QUECTEL_PRODUCT_RM520N 0x0801 - #define QUECTEL_PRODUCT_EC200S_CN 0x6002 - #define QUECTEL_PRODUCT_EC200T 0x6026 - #define QUECTEL_PRODUCT_RM500K 0x7001 -@@ -1138,6 +1139,8 @@ static const struct usb_device_id option_ids[] = { - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), - .driver_info = NUMEP2 }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, -+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */ -+ .driver_info = ZLP }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), - .driver_info = RSVD(4) }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), -@@ -1159,6 +1162,9 @@ static const struct usb_device_id option_ids[] = { - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), - .driver_info = ZLP }, -+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, -+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, -+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, -diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c -index d5f3f763717ea..d4b2519257962 100644 ---- a/drivers/xen/xenbus/xenbus_client.c -+++ b/drivers/xen/xenbus/xenbus_client.c -@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, - unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; - grant_ref_t gref_head; - unsigned int i; -+ void *addr; - int ret; - -- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); -+ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); - if (!*vaddr) { - ret = -ENOMEM; - goto err; -@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, - unsigned long gfn; - - if (is_vmalloc_addr(*vaddr)) -- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i])); -+ gfn = pfn_to_gfn(vmalloc_to_pfn(addr)); - else -- gfn = virt_to_gfn(vaddr[i]); -+ gfn = virt_to_gfn(addr); - - grefs[i] = gnttab_claim_grant_reference(&gref_head); - gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, - gfn, 0); -+ -+ addr += XEN_PAGE_SIZE; - } - - return 0; -diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index 781952c5a5c23..20ad619a8a973 100644 ---- a/fs/btrfs/disk-io.c -+++ b/fs/btrfs/disk-io.c -@@ -4586,6 +4586,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) - - set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); - -+ /* -+ * If we had UNFINISHED_DROPS we could still be processing them, so -+ * clear that bit and wake up relocation so it can stop. -+ * We must do this before stopping the block group reclaim task, because -+ * at btrfs_relocate_block_group() we wait for this bit, and after the -+ * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we -+ * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will -+ * return 1. -+ */ -+ btrfs_wake_unfinished_drop(fs_info); -+ - /* - * We may have the reclaim task running and relocating a data block group, - * in which case it may create delayed iputs. So stop it before we park -@@ -4604,12 +4615,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) - */ - kthread_park(fs_info->cleaner_kthread); - -- /* -- * If we had UNFINISHED_DROPS we could still be processing them, so -- * clear that bit and wake up relocation so it can stop. -- */ -- btrfs_wake_unfinished_drop(fs_info); -- - /* wait for the qgroup rescan worker to stop */ - btrfs_qgroup_wait_for_completion(fs_info, false); - -@@ -4632,6 +4637,31 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) - /* clear out the rbtree of defraggable inodes */ - btrfs_cleanup_defrag_inodes(fs_info); - -+ /* -+ * After we parked the cleaner kthread, ordered extents may have -+ * completed and created new delayed iputs. If one of the async reclaim -+ * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we -+ * can hang forever trying to stop it, because if a delayed iput is -+ * added after it ran btrfs_run_delayed_iputs() and before it called -+ * btrfs_wait_on_delayed_iputs(), it will hang forever since there is -+ * no one else to run iputs. -+ * -+ * So wait for all ongoing ordered extents to complete and then run -+ * delayed iputs. This works because once we reach this point no one -+ * can either create new ordered extents nor create delayed iputs -+ * through some other means. -+ * -+ * Also note that btrfs_wait_ordered_roots() is not safe here, because -+ * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent, -+ * but the delayed iput for the respective inode is made only when doing -+ * the final btrfs_put_ordered_extent() (which must happen at -+ * btrfs_finish_ordered_io() when we are unmounting). -+ */ -+ btrfs_flush_workqueue(fs_info->endio_write_workers); -+ /* Ordered extents for free space inodes. */ -+ btrfs_flush_workqueue(fs_info->endio_freespace_worker); -+ btrfs_run_delayed_iputs(fs_info); -+ - cancel_work_sync(&fs_info->async_reclaim_work); - cancel_work_sync(&fs_info->async_data_reclaim_work); - cancel_work_sync(&fs_info->preempt_reclaim_work); -diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c -index 1386362fad3b8..4448b7b6ea221 100644 ---- a/fs/btrfs/zoned.c -+++ b/fs/btrfs/zoned.c -@@ -1918,10 +1918,44 @@ out_unlock: - return ret; - } - -+static void wait_eb_writebacks(struct btrfs_block_group *block_group) -+{ -+ struct btrfs_fs_info *fs_info = block_group->fs_info; -+ const u64 end = block_group->start + block_group->length; -+ struct radix_tree_iter iter; -+ struct extent_buffer *eb; -+ void __rcu **slot; -+ -+ rcu_read_lock(); -+ radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, -+ block_group->start >> fs_info->sectorsize_bits) { -+ eb = radix_tree_deref_slot(slot); -+ if (!eb) -+ continue; -+ if (radix_tree_deref_retry(eb)) { -+ slot = radix_tree_iter_retry(&iter); -+ continue; -+ } -+ -+ if (eb->start < block_group->start) -+ continue; -+ if (eb->start >= end) -+ break; -+ -+ slot = radix_tree_iter_resume(slot, &iter); -+ rcu_read_unlock(); -+ wait_on_extent_buffer_writeback(eb); -+ rcu_read_lock(); -+ } -+ rcu_read_unlock(); -+} -+ - static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written) - { - struct btrfs_fs_info *fs_info = block_group->fs_info; - struct map_lookup *map; -+ const bool is_metadata = (block_group->flags & -+ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)); - int ret = 0; - int i; - -@@ -1932,8 +1966,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ - } - - /* Check if we have unwritten allocated space */ -- if ((block_group->flags & -- (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) && -+ if (is_metadata && - block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) { - spin_unlock(&block_group->lock); - return -EAGAIN; -@@ -1958,6 +1991,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ - /* No need to wait for NOCOW writers. Zoned mode does not allow that */ - btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start, - block_group->length); -+ /* Wait for extent buffers to be written. */ -+ if (is_metadata) -+ wait_eb_writebacks(block_group); - - spin_lock(&block_group->lock); - -diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c -index 8f2e003e05907..97278c43f8dc0 100644 ---- a/fs/cifs/cifsfs.c -+++ b/fs/cifs/cifsfs.c -@@ -1232,6 +1232,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, - lock_two_nondirectories(target_inode, src_inode); - - cifs_dbg(FYI, "about to flush pages\n"); -+ -+ rc = filemap_write_and_wait_range(src_inode->i_mapping, off, -+ off + len - 1); -+ if (rc) -+ goto out; -+ - /* should we flush first and last page first */ - truncate_inode_pages(&target_inode->i_data, 0); - -diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c -index e8a8daa82ed76..cc180d37b8ce1 100644 ---- a/fs/cifs/smb2ops.c -+++ b/fs/cifs/smb2ops.c -@@ -1886,17 +1886,8 @@ smb2_copychunk_range(const unsigned int xid, - int chunks_copied = 0; - bool chunk_sizes_updated = false; - ssize_t bytes_written, total_bytes_written = 0; -- struct inode *inode; - - pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); -- -- /* -- * We need to flush all unwritten data before we can send the -- * copychunk ioctl to the server. -- */ -- inode = d_inode(trgtfile->dentry); -- filemap_write_and_wait(inode->i_mapping); -- - if (pcchunk == NULL) - return -ENOMEM; - -@@ -3961,39 +3952,50 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon, - { - int rc; - unsigned int xid; -- struct inode *inode; -+ struct inode *inode = file_inode(file); - struct cifsFileInfo *cfile = file->private_data; -- struct cifsInodeInfo *cifsi; -+ struct cifsInodeInfo *cifsi = CIFS_I(inode); - __le64 eof; -+ loff_t old_eof; - - xid = get_xid(); - -- inode = d_inode(cfile->dentry); -- cifsi = CIFS_I(inode); -+ inode_lock(inode); - -- if (off >= i_size_read(inode) || -- off + len >= i_size_read(inode)) { -+ old_eof = i_size_read(inode); -+ if ((off >= old_eof) || -+ off + len >= old_eof) { - rc = -EINVAL; - goto out; - } - -+ filemap_invalidate_lock(inode->i_mapping); -+ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1); -+ if (rc < 0) -+ goto out_2; -+ -+ truncate_pagecache_range(inode, off, old_eof); -+ - rc = smb2_copychunk_range(xid, cfile, cfile, off + len, -- i_size_read(inode) - off - len, off); -+ old_eof - off - len, off); - if (rc < 0) -- goto out; -+ goto out_2; - -- eof = cpu_to_le64(i_size_read(inode) - len); -+ eof = cpu_to_le64(old_eof - len); - rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, cfile->pid, &eof); - if (rc < 0) -- goto out; -+ goto out_2; - - rc = 0; - - cifsi->server_eof = i_size_read(inode) - len; - truncate_setsize(inode, cifsi->server_eof); - fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof); -+out_2: -+ filemap_invalidate_unlock(inode->i_mapping); - out: -+ inode_unlock(inode); - free_xid(xid); - return rc; - } -@@ -4004,34 +4006,47 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon, - int rc; - unsigned int xid; - struct cifsFileInfo *cfile = file->private_data; -+ struct inode *inode = file_inode(file); - __le64 eof; -- __u64 count; -+ __u64 count, old_eof; - - xid = get_xid(); - -- if (off >= i_size_read(file->f_inode)) { -+ inode_lock(inode); -+ -+ old_eof = i_size_read(inode); -+ if (off >= old_eof) { - rc = -EINVAL; - goto out; - } - -- count = i_size_read(file->f_inode) - off; -- eof = cpu_to_le64(i_size_read(file->f_inode) + len); -+ count = old_eof - off; -+ eof = cpu_to_le64(old_eof + len); -+ -+ filemap_invalidate_lock(inode->i_mapping); -+ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1); -+ if (rc < 0) -+ goto out_2; -+ truncate_pagecache_range(inode, off, old_eof); - - rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, cfile->pid, &eof); - if (rc < 0) -- goto out; -+ goto out_2; - - rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len); - if (rc < 0) -- goto out; -+ goto out_2; - -- rc = smb3_zero_range(file, tcon, off, len, 1); -+ rc = smb3_zero_data(file, tcon, off, len, xid); - if (rc < 0) -- goto out; -+ goto out_2; - - rc = 0; -+out_2: -+ filemap_invalidate_unlock(inode->i_mapping); - out: -+ inode_unlock(inode); - free_xid(xid); - return rc; - } -diff --git a/fs/dax.c b/fs/dax.c -index 4155a6107fa10..7ab248ed21aa3 100644 ---- a/fs/dax.c -+++ b/fs/dax.c -@@ -1241,6 +1241,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, - loff_t done = 0; - int ret; - -+ if (!iomi.len) -+ return 0; -+ - if (iov_iter_rw(iter) == WRITE) { - lockdep_assert_held_write(&iomi.inode->i_rwsem); - iomi.flags |= IOMAP_WRITE; -diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c -index 9de6a6b844c9e..e541a004f8efa 100644 ---- a/fs/exfat/fatent.c -+++ b/fs/exfat/fatent.c -@@ -270,8 +270,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu) - struct super_block *sb = dir->i_sb; - struct exfat_sb_info *sbi = EXFAT_SB(sb); - struct buffer_head *bh; -- sector_t blknr, last_blknr; -- int i; -+ sector_t blknr, last_blknr, i; - - blknr = exfat_cluster_to_sector(sbi, clu); - last_blknr = blknr + sbi->sect_per_clus; -diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h -index adfc30ee4b7be..0d86931269bfc 100644 ---- a/fs/ext4/ext4.h -+++ b/fs/ext4/ext4.h -@@ -167,8 +167,6 @@ enum SHIFT_DIRECTION { - #define EXT4_MB_CR0_OPTIMIZED 0x8000 - /* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */ - #define EXT4_MB_CR1_OPTIMIZED 0x00010000 --/* Perform linear traversal for one group */ --#define EXT4_MB_SEARCH_NEXT_LINEAR 0x00020000 - struct ext4_allocation_request { - /* target inode for block we're allocating */ - struct inode *inode; -@@ -1589,8 +1587,8 @@ struct ext4_sb_info { - struct list_head s_discard_list; - struct work_struct s_discard_work; - atomic_t s_retry_alloc_pending; -- struct rb_root s_mb_avg_fragment_size_root; -- rwlock_t s_mb_rb_lock; -+ struct list_head *s_mb_avg_fragment_size; -+ rwlock_t *s_mb_avg_fragment_size_locks; - struct list_head *s_mb_largest_free_orders; - rwlock_t *s_mb_largest_free_orders_locks; - -@@ -3402,6 +3400,8 @@ struct ext4_group_info { - ext4_grpblk_t bb_first_free; /* first free block */ - ext4_grpblk_t bb_free; /* total free blocks */ - ext4_grpblk_t bb_fragments; /* nr of freespace fragments */ -+ int bb_avg_fragment_size_order; /* order of average -+ fragment in BG */ - ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */ - ext4_group_t bb_group; /* Group number */ - struct list_head bb_prealloc_list; -@@ -3409,7 +3409,7 @@ struct ext4_group_info { - void *bb_bitmap; - #endif - struct rw_semaphore alloc_sem; -- struct rb_node bb_avg_fragment_size_rb; -+ struct list_head bb_avg_fragment_size_node; - struct list_head bb_largest_free_order_node; - ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block - * regions, index is order. -diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c -index c148bb97b5273..5235974126bd3 100644 ---- a/fs/ext4/extents.c -+++ b/fs/ext4/extents.c -@@ -460,6 +460,10 @@ static int __ext4_ext_check(const char *function, unsigned int line, - error_msg = "invalid eh_entries"; - goto corrupted; - } -+ if (unlikely((eh->eh_entries == 0) && (depth > 0))) { -+ error_msg = "eh_entries is 0 but eh_depth is > 0"; -+ goto corrupted; -+ } - if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { - error_msg = "invalid extent entries"; - goto corrupted; -diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c -index f73e5eb43eae1..208b87ce88588 100644 ---- a/fs/ext4/ialloc.c -+++ b/fs/ext4/ialloc.c -@@ -510,7 +510,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, - goto fallback; - } - -- max_dirs = ndirs / ngroups + inodes_per_group / 16; -+ max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16; - min_inodes = avefreei - inodes_per_group*flex_size / 4; - if (min_inodes < 1) - min_inodes = 1; -diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c -index 38e7dc2531b17..fd29e15d1c3b5 100644 ---- a/fs/ext4/mballoc.c -+++ b/fs/ext4/mballoc.c -@@ -140,13 +140,15 @@ - * number of buddy bitmap orders possible) number of lists. Group-infos are - * placed in appropriate lists. - * -- * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root) -+ * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) - * -- * Locking: sbi->s_mb_rb_lock (rwlock) -+ * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) - * -- * This is a red black tree consisting of group infos and the tree is sorted -- * by average fragment sizes (which is calculated as ext4_group_info->bb_free -- * / ext4_group_info->bb_fragments). -+ * This is an array of lists where in the i-th list there are groups with -+ * average fragment size >= 2^i and < 2^(i+1). The average fragment size -+ * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. -+ * Note that we don't bother with a special list for completely empty groups -+ * so we only have MB_NUM_ORDERS(sb) lists. - * - * When "mb_optimize_scan" mount option is set, mballoc consults the above data - * structures to decide the order in which groups are to be traversed for -@@ -160,7 +162,8 @@ - * - * At CR = 1, we only consider groups where average fragment size > request - * size. So, we lookup a group which has average fragment size just above or -- * equal to request size using our rb tree (data structure 2) in O(log N) time. -+ * equal to request size using our average fragment size group lists (data -+ * structure 2) in O(1) time. - * - * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in - * linear order which requires O(N) search time for each CR 0 and CR 1 phase. -@@ -802,65 +805,51 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, - } - } - --static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new, -- int (*cmp)(struct rb_node *, struct rb_node *)) -+static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) - { -- struct rb_node **iter = &root->rb_node, *parent = NULL; -+ int order; - -- while (*iter) { -- parent = *iter; -- if (cmp(new, *iter) > 0) -- iter = &((*iter)->rb_left); -- else -- iter = &((*iter)->rb_right); -- } -- -- rb_link_node(new, parent, iter); -- rb_insert_color(new, root); --} -- --static int --ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2) --{ -- struct ext4_group_info *grp1 = rb_entry(rb1, -- struct ext4_group_info, -- bb_avg_fragment_size_rb); -- struct ext4_group_info *grp2 = rb_entry(rb2, -- struct ext4_group_info, -- bb_avg_fragment_size_rb); -- int num_frags_1, num_frags_2; -- -- num_frags_1 = grp1->bb_fragments ? -- grp1->bb_free / grp1->bb_fragments : 0; -- num_frags_2 = grp2->bb_fragments ? -- grp2->bb_free / grp2->bb_fragments : 0; -- -- return (num_frags_2 - num_frags_1); -+ /* -+ * We don't bother with a special lists groups with only 1 block free -+ * extents and for completely empty groups. -+ */ -+ order = fls(len) - 2; -+ if (order < 0) -+ return 0; -+ if (order == MB_NUM_ORDERS(sb)) -+ order--; -+ return order; - } - --/* -- * Reinsert grpinfo into the avg_fragment_size tree with new average -- * fragment size. -- */ -+/* Move group to appropriate avg_fragment_size list */ - static void - mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) - { - struct ext4_sb_info *sbi = EXT4_SB(sb); -+ int new_order; - - if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) - return; - -- write_lock(&sbi->s_mb_rb_lock); -- if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) { -- rb_erase(&grp->bb_avg_fragment_size_rb, -- &sbi->s_mb_avg_fragment_size_root); -- RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb); -- } -+ new_order = mb_avg_fragment_size_order(sb, -+ grp->bb_free / grp->bb_fragments); -+ if (new_order == grp->bb_avg_fragment_size_order) -+ return; - -- ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root, -- &grp->bb_avg_fragment_size_rb, -- ext4_mb_avg_fragment_size_cmp); -- write_unlock(&sbi->s_mb_rb_lock); -+ if (grp->bb_avg_fragment_size_order != -1) { -+ write_lock(&sbi->s_mb_avg_fragment_size_locks[ -+ grp->bb_avg_fragment_size_order]); -+ list_del(&grp->bb_avg_fragment_size_node); -+ write_unlock(&sbi->s_mb_avg_fragment_size_locks[ -+ grp->bb_avg_fragment_size_order]); -+ } -+ grp->bb_avg_fragment_size_order = new_order; -+ write_lock(&sbi->s_mb_avg_fragment_size_locks[ -+ grp->bb_avg_fragment_size_order]); -+ list_add_tail(&grp->bb_avg_fragment_size_node, -+ &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); -+ write_unlock(&sbi->s_mb_avg_fragment_size_locks[ -+ grp->bb_avg_fragment_size_order]); - } - - /* -@@ -909,86 +898,55 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, - *new_cr = 1; - } else { - *group = grp->bb_group; -- ac->ac_last_optimal_group = *group; - ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; - } - } - - /* -- * Choose next group by traversing average fragment size tree. Updates *new_cr -- * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that -- * the linear search should continue for one iteration since there's lock -- * contention on the rb tree lock. -+ * Choose next group by traversing average fragment size list of suitable -+ * order. Updates *new_cr if cr level needs an update. - */ - static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, - int *new_cr, ext4_group_t *group, ext4_group_t ngroups) - { - struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); -- int avg_fragment_size, best_so_far; -- struct rb_node *node, *found; -- struct ext4_group_info *grp; -- -- /* -- * If there is contention on the lock, instead of waiting for the lock -- * to become available, just continue searching lineraly. We'll resume -- * our rb tree search later starting at ac->ac_last_optimal_group. -- */ -- if (!read_trylock(&sbi->s_mb_rb_lock)) { -- ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR; -- return; -- } -+ struct ext4_group_info *grp = NULL, *iter; -+ int i; - - if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { - if (sbi->s_mb_stats) - atomic_inc(&sbi->s_bal_cr1_bad_suggestions); -- /* We have found something at CR 1 in the past */ -- grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group); -- for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL; -- found = rb_next(found)) { -- grp = rb_entry(found, struct ext4_group_info, -- bb_avg_fragment_size_rb); -+ } -+ -+ for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); -+ i < MB_NUM_ORDERS(ac->ac_sb); i++) { -+ if (list_empty(&sbi->s_mb_avg_fragment_size[i])) -+ continue; -+ read_lock(&sbi->s_mb_avg_fragment_size_locks[i]); -+ if (list_empty(&sbi->s_mb_avg_fragment_size[i])) { -+ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); -+ continue; -+ } -+ list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i], -+ bb_avg_fragment_size_node) { - if (sbi->s_mb_stats) - atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); -- if (likely(ext4_mb_good_group(ac, grp->bb_group, 1))) -+ if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) { -+ grp = iter; - break; -- } -- goto done; -- } -- -- node = sbi->s_mb_avg_fragment_size_root.rb_node; -- best_so_far = 0; -- found = NULL; -- -- while (node) { -- grp = rb_entry(node, struct ext4_group_info, -- bb_avg_fragment_size_rb); -- avg_fragment_size = 0; -- if (ext4_mb_good_group(ac, grp->bb_group, 1)) { -- avg_fragment_size = grp->bb_fragments ? -- grp->bb_free / grp->bb_fragments : 0; -- if (!best_so_far || avg_fragment_size < best_so_far) { -- best_so_far = avg_fragment_size; -- found = node; - } - } -- if (avg_fragment_size > ac->ac_g_ex.fe_len) -- node = node->rb_right; -- else -- node = node->rb_left; -+ read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); -+ if (grp) -+ break; - } - --done: -- if (found) { -- grp = rb_entry(found, struct ext4_group_info, -- bb_avg_fragment_size_rb); -+ if (grp) { - *group = grp->bb_group; - ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; - } else { - *new_cr = 2; - } -- -- read_unlock(&sbi->s_mb_rb_lock); -- ac->ac_last_optimal_group = *group; - } - - static inline int should_optimize_scan(struct ext4_allocation_context *ac) -@@ -1017,11 +975,6 @@ next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) - goto inc_and_return; - } - -- if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) { -- ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR; -- goto inc_and_return; -- } -- - return group; - inc_and_return: - /* -@@ -1049,8 +1002,10 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, - { - *new_cr = ac->ac_criteria; - -- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) -+ if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { -+ *group = next_linear_group(ac, *group, ngroups); - return; -+ } - - if (*new_cr == 0) { - ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); -@@ -1075,23 +1030,25 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) - struct ext4_sb_info *sbi = EXT4_SB(sb); - int i; - -- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) { -+ for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) -+ if (grp->bb_counters[i] > 0) -+ break; -+ /* No need to move between order lists? */ -+ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || -+ i == grp->bb_largest_free_order) { -+ grp->bb_largest_free_order = i; -+ return; -+ } -+ -+ if (grp->bb_largest_free_order >= 0) { - write_lock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); - list_del_init(&grp->bb_largest_free_order_node); - write_unlock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); - } -- grp->bb_largest_free_order = -1; /* uninit */ -- -- for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) { -- if (grp->bb_counters[i] > 0) { -- grp->bb_largest_free_order = i; -- break; -- } -- } -- if (test_opt2(sb, MB_OPTIMIZE_SCAN) && -- grp->bb_largest_free_order >= 0 && grp->bb_free) { -+ grp->bb_largest_free_order = i; -+ if (grp->bb_largest_free_order >= 0 && grp->bb_free) { - write_lock(&sbi->s_mb_largest_free_orders_locks[ - grp->bb_largest_free_order]); - list_add_tail(&grp->bb_largest_free_order_node, -@@ -1148,13 +1105,13 @@ void ext4_mb_generate_buddy(struct super_block *sb, - EXT4_GROUP_INFO_BBITMAP_CORRUPT); - } - mb_set_largest_free_order(sb, grp); -+ mb_update_avg_fragment_size(sb, grp); - - clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); - - period = get_cycles() - period; - atomic_inc(&sbi->s_mb_buddies_generated); - atomic64_add(period, &sbi->s_mb_generation_time); -- mb_update_avg_fragment_size(sb, grp); - } - - /* The buddy information is attached the buddy cache inode -@@ -2630,7 +2587,7 @@ static noinline_for_stack int - ext4_mb_regular_allocator(struct ext4_allocation_context *ac) - { - ext4_group_t prefetch_grp = 0, ngroups, group, i; -- int cr = -1; -+ int cr = -1, new_cr; - int err = 0, first_err = 0; - unsigned int nr = 0, prefetch_ios = 0; - struct ext4_sb_info *sbi; -@@ -2701,17 +2658,14 @@ repeat: - * from the goal value specified - */ - group = ac->ac_g_ex.fe_group; -- ac->ac_last_optimal_group = group; - ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; - prefetch_grp = group; - -- for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups), -- i++) { -- int ret = 0, new_cr; -+ for (i = 0, new_cr = cr; i < ngroups; i++, -+ ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { -+ int ret = 0; - - cond_resched(); -- -- ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups); - if (new_cr != cr) { - cr = new_cr; - goto repeat; -@@ -2985,9 +2939,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock) - struct super_block *sb = pde_data(file_inode(seq->file)); - unsigned long position; - -- read_lock(&EXT4_SB(sb)->s_mb_rb_lock); -- -- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1) -+ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) - return NULL; - position = *pos + 1; - return (void *) ((unsigned long) position); -@@ -2999,7 +2951,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof - unsigned long position; - - ++*pos; -- if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1) -+ if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) - return NULL; - position = *pos + 1; - return (void *) ((unsigned long) position); -@@ -3011,29 +2963,22 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) - struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long position = ((unsigned long) v); - struct ext4_group_info *grp; -- struct rb_node *n; -- unsigned int count, min, max; -+ unsigned int count; - - position--; - if (position >= MB_NUM_ORDERS(sb)) { -- seq_puts(seq, "fragment_size_tree:\n"); -- n = rb_first(&sbi->s_mb_avg_fragment_size_root); -- if (!n) { -- seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n"); -- return 0; -- } -- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb); -- min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0; -- count = 1; -- while (rb_next(n)) { -- count++; -- n = rb_next(n); -- } -- grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb); -- max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0; -+ position -= MB_NUM_ORDERS(sb); -+ if (position == 0) -+ seq_puts(seq, "avg_fragment_size_lists:\n"); - -- seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n", -- min, max, count); -+ count = 0; -+ read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); -+ list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], -+ bb_avg_fragment_size_node) -+ count++; -+ read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); -+ seq_printf(seq, "\tlist_order_%u_groups: %u\n", -+ (unsigned int)position, count); - return 0; - } - -@@ -3043,9 +2988,11 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) - seq_puts(seq, "max_free_order_lists:\n"); - } - count = 0; -+ read_lock(&sbi->s_mb_largest_free_orders_locks[position]); - list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], - bb_largest_free_order_node) - count++; -+ read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); - seq_printf(seq, "\tlist_order_%u_groups: %u\n", - (unsigned int)position, count); - -@@ -3053,11 +3000,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) - } - - static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) --__releases(&EXT4_SB(sb)->s_mb_rb_lock) - { -- struct super_block *sb = pde_data(file_inode(seq->file)); -- -- read_unlock(&EXT4_SB(sb)->s_mb_rb_lock); - } - - const struct seq_operations ext4_mb_seq_structs_summary_ops = { -@@ -3170,8 +3113,9 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, - init_rwsem(&meta_group_info[i]->alloc_sem); - meta_group_info[i]->bb_free_root = RB_ROOT; - INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); -- RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb); -+ INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); - meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ -+ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ - meta_group_info[i]->bb_group = group; - - mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); -@@ -3420,7 +3364,24 @@ int ext4_mb_init(struct super_block *sb) - i++; - } while (i < MB_NUM_ORDERS(sb)); - -- sbi->s_mb_avg_fragment_size_root = RB_ROOT; -+ sbi->s_mb_avg_fragment_size = -+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), -+ GFP_KERNEL); -+ if (!sbi->s_mb_avg_fragment_size) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ sbi->s_mb_avg_fragment_size_locks = -+ kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), -+ GFP_KERNEL); -+ if (!sbi->s_mb_avg_fragment_size_locks) { -+ ret = -ENOMEM; -+ goto out; -+ } -+ for (i = 0; i < MB_NUM_ORDERS(sb); i++) { -+ INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); -+ rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); -+ } - sbi->s_mb_largest_free_orders = - kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), - GFP_KERNEL); -@@ -3439,7 +3400,6 @@ int ext4_mb_init(struct super_block *sb) - INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); - rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); - } -- rwlock_init(&sbi->s_mb_rb_lock); - - spin_lock_init(&sbi->s_md_lock); - sbi->s_mb_free_pending = 0; -@@ -3510,6 +3470,8 @@ out_free_locality_groups: - free_percpu(sbi->s_locality_groups); - sbi->s_locality_groups = NULL; - out: -+ kfree(sbi->s_mb_avg_fragment_size); -+ kfree(sbi->s_mb_avg_fragment_size_locks); - kfree(sbi->s_mb_largest_free_orders); - kfree(sbi->s_mb_largest_free_orders_locks); - kfree(sbi->s_mb_offsets); -@@ -3576,6 +3538,8 @@ int ext4_mb_release(struct super_block *sb) - kvfree(group_info); - rcu_read_unlock(); - } -+ kfree(sbi->s_mb_avg_fragment_size); -+ kfree(sbi->s_mb_avg_fragment_size_locks); - kfree(sbi->s_mb_largest_free_orders); - kfree(sbi->s_mb_largest_free_orders_locks); - kfree(sbi->s_mb_offsets); -@@ -5187,6 +5151,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) - struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); - int bsbits = ac->ac_sb->s_blocksize_bits; - loff_t size, isize; -+ bool inode_pa_eligible, group_pa_eligible; - - if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) - return; -@@ -5194,25 +5159,27 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) - if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) - return; - -+ group_pa_eligible = sbi->s_mb_group_prealloc > 0; -+ inode_pa_eligible = true; - size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); - isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) - >> bsbits; - -+ /* No point in using inode preallocation for closed files */ - if ((size == isize) && !ext4_fs_is_busy(sbi) && -- !inode_is_open_for_write(ac->ac_inode)) { -- ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; -- return; -- } -+ !inode_is_open_for_write(ac->ac_inode)) -+ inode_pa_eligible = false; - -- if (sbi->s_mb_group_prealloc <= 0) { -- ac->ac_flags |= EXT4_MB_STREAM_ALLOC; -- return; -- } -- -- /* don't use group allocation for large files */ - size = max(size, isize); -- if (size > sbi->s_mb_stream_request) { -- ac->ac_flags |= EXT4_MB_STREAM_ALLOC; -+ /* Don't use group allocation for large files */ -+ if (size > sbi->s_mb_stream_request) -+ group_pa_eligible = false; -+ -+ if (!group_pa_eligible) { -+ if (inode_pa_eligible) -+ ac->ac_flags |= EXT4_MB_STREAM_ALLOC; -+ else -+ ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; - return; - } - -@@ -5559,6 +5526,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, - ext4_fsblk_t block = 0; - unsigned int inquota = 0; - unsigned int reserv_clstrs = 0; -+ int retries = 0; - u64 seq; - - might_sleep(); -@@ -5661,7 +5629,8 @@ repeat: - ar->len = ac->ac_b_ex.fe_len; - } - } else { -- if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) -+ if (++retries < 3 && -+ ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) - goto repeat; - /* - * If block allocation fails then the pa allocated above -diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h -index 39da92ceabf88..dcda2a943cee0 100644 ---- a/fs/ext4/mballoc.h -+++ b/fs/ext4/mballoc.h -@@ -178,7 +178,6 @@ struct ext4_allocation_context { - /* copy of the best found extent taken before preallocation efforts */ - struct ext4_free_extent ac_f_ex; - -- ext4_group_t ac_last_optimal_group; - __u32 ac_groups_considered; - __u32 ac_flags; /* allocation hints */ - __u16 ac_groups_scanned; -diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h -index 7515a465ec03a..7c90b1ab3e00d 100644 ---- a/include/asm-generic/vmlinux.lds.h -+++ b/include/asm-generic/vmlinux.lds.h -@@ -543,10 +543,9 @@ - */ - #ifdef CONFIG_CFI_CLANG - #define TEXT_CFI_JT \ -- . = ALIGN(PMD_SIZE); \ -+ ALIGN_FUNCTION(); \ - __cfi_jt_start = .; \ - *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \ -- . = ALIGN(PMD_SIZE); \ - __cfi_jt_end = .; - #else - #define TEXT_CFI_JT -diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h -index e2d9daf7e8dd0..0fd96e92c6c65 100644 ---- a/include/linux/blk-mq.h -+++ b/include/linux/blk-mq.h -@@ -686,10 +686,13 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, - \ - __blk_mq_alloc_disk(set, queuedata, &__key); \ - }) -+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, -+ struct lock_class_key *lkclass); - struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); - int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, - struct request_queue *q); - void blk_mq_unregister_dev(struct device *, struct request_queue *); -+void blk_mq_destroy_queue(struct request_queue *); - - int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); - int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, -diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 62e3ff52ab033..83eb8869a8c94 100644 ---- a/include/linux/blkdev.h -+++ b/include/linux/blkdev.h -@@ -148,6 +148,7 @@ struct gendisk { - #define GD_NATIVE_CAPACITY 3 - #define GD_ADDED 4 - #define GD_SUPPRESS_PART_SCAN 5 -+#define GD_OWNS_QUEUE 6 - - struct mutex open_mutex; /* open/close mutex */ - unsigned open_partitions; /* number of open partitions */ -@@ -559,7 +560,6 @@ struct request_queue { - #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ - #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ - #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ --#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ - #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ - #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */ - #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ -@@ -587,7 +587,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); - #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) - #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) - #define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags) --#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) - #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) - #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) - #define blk_queue_noxmerges(q) \ -@@ -812,8 +811,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb) - - int bdev_disk_changed(struct gendisk *disk, bool invalidate); - --struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, -- struct lock_class_key *lkclass); - void put_disk(struct gendisk *disk); - struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); - -@@ -955,7 +952,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, - /* - * Access functions for manipulating queue properties - */ --extern void blk_cleanup_queue(struct request_queue *); - void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); - extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); - extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); -diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h -index 4592d08459417..57aa459c6618a 100644 ---- a/include/linux/cpumask.h -+++ b/include/linux/cpumask.h -@@ -1083,9 +1083,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, - * cover a worst-case of every other cpu being on one of two nodes for a - * very large NR_CPUS. - * -- * Use PAGE_SIZE as a minimum for smaller configurations. -+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding -+ * unsigned comparison to -1. - */ --#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \ -+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \ - ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE) - #define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE) - -diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h -index fde258b3decd5..037a8d81a66cf 100644 ---- a/include/linux/serial_core.h -+++ b/include/linux/serial_core.h -@@ -302,6 +302,23 @@ struct uart_state { - /* number of characters left in xmit buffer before we ask for more */ - #define WAKEUP_CHARS 256 - -+/** -+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars -+ * @up: uart_port structure describing the port -+ * @chars: number of characters sent -+ * -+ * This function advances the tail of circular xmit buffer by the number of -+ * @chars transmitted and handles accounting of transmitted bytes (into -+ * @up's icount.tx). -+ */ -+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) -+{ -+ struct circ_buf *xmit = &up->state->xmit; -+ -+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); -+ up->icount.tx += chars; -+} -+ - struct module; - struct tty_driver; - -diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h -index 184105d682942..f2273bd5a4c58 100644 ---- a/include/net/bond_3ad.h -+++ b/include/net/bond_3ad.h -@@ -15,8 +15,6 @@ - #define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) - #define AD_TIMER_INTERVAL 100 /*msec*/ - --#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} -- - #define AD_LACP_SLOW 0 - #define AD_LACP_FAST 1 - -diff --git a/include/net/bonding.h b/include/net/bonding.h -index 3b816ae8b1f3b..7ac1773b99224 100644 ---- a/include/net/bonding.h -+++ b/include/net/bonding.h -@@ -785,6 +785,9 @@ extern struct rtnl_link_ops bond_link_ops; - /* exported from bond_sysfs_slave.c */ - extern const struct sysfs_ops slave_sysfs_ops; - -+/* exported from bond_3ad.c */ -+extern const u8 lacpdu_mcast_addr[]; -+ - static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) - { - dev_core_stats_tx_dropped_inc(dev); -diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h -index 667d889b92b52..3e1cea155049b 100644 ---- a/include/scsi/scsi_host.h -+++ b/include/scsi/scsi_host.h -@@ -557,6 +557,8 @@ struct Scsi_Host { - struct scsi_host_template *hostt; - struct scsi_transport_template *transportt; - -+ struct kref tagset_refcnt; -+ struct completion tagset_freed; - /* Area to keep a shared tag map */ - struct blk_mq_tag_set tag_set; - -diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h -index 65e13a099b1a0..a9f5d884560ac 100644 ---- a/include/uapi/linux/xfrm.h -+++ b/include/uapi/linux/xfrm.h -@@ -296,7 +296,7 @@ enum xfrm_attr_type_t { - XFRMA_ETIMER_THRESH, - XFRMA_SRCADDR, /* xfrm_address_t */ - XFRMA_COADDR, /* xfrm_address_t */ -- XFRMA_LASTUSED, /* unsigned long */ -+ XFRMA_LASTUSED, /* __u64 */ - XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */ - XFRMA_MIGRATE, - XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */ -diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c -index 602da2cfd57c8..15a6f1e93e5af 100644 ---- a/io_uring/io_uring.c -+++ b/io_uring/io_uring.c -@@ -10951,6 +10951,9 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) - io_poll_remove_all(ctx, NULL, true); - /* if we failed setting up the ctx, we might not have any rings */ - io_iopoll_try_reap_events(ctx); -+ /* drop cached put refs after potentially doing completions */ -+ if (current->io_uring) -+ io_uring_drop_tctx_refs(current); - } - - INIT_WORK(&ctx->exit_work, io_ring_exit_work); -diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c -index e702ca368539a..80c23f48f3b4b 100644 ---- a/kernel/cgroup/cgroup.c -+++ b/kernel/cgroup/cgroup.c -@@ -6026,6 +6026,9 @@ struct cgroup *cgroup_get_from_id(u64 id) - if (!kn) - goto out; - -+ if (kernfs_type(kn) != KERNFS_DIR) -+ goto put; -+ - rcu_read_lock(); - - cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); -@@ -6033,7 +6036,7 @@ struct cgroup *cgroup_get_from_id(u64 id) - cgrp = NULL; - - rcu_read_unlock(); -- -+put: - kernfs_put(kn); - out: - return cgrp; -diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index aa8a82bc67384..fc6e4f2523452 100644 ---- a/kernel/workqueue.c -+++ b/kernel/workqueue.c -@@ -3066,10 +3066,8 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) - if (WARN_ON(!work->func)) - return false; - -- if (!from_cancel) { -- lock_map_acquire(&work->lockdep_map); -- lock_map_release(&work->lockdep_map); -- } -+ lock_map_acquire(&work->lockdep_map); -+ lock_map_release(&work->lockdep_map); - - if (start_flush_work(work, &barr, from_cancel)) { - wait_for_completion(&barr.done); -diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug -index 2e24db4bff192..c399ab486557f 100644 ---- a/lib/Kconfig.debug -+++ b/lib/Kconfig.debug -@@ -264,8 +264,10 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT - config DEBUG_INFO_DWARF4 - bool "Generate DWARF Version 4 debuginfo" - select DEBUG_INFO -+ depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502))) - help -- Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+. -+ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2 -+ if using clang without clang's integrated assembler, and gdb 7.0+. - - If you have consumers of DWARF debug info that are not ready for - newer revisions of DWARF, you may wish to choose this or have your -diff --git a/mm/slab_common.c b/mm/slab_common.c -index dbd4b6f9b0e79..29ae1358d5f07 100644 ---- a/mm/slab_common.c -+++ b/mm/slab_common.c -@@ -503,6 +503,7 @@ void slab_kmem_cache_release(struct kmem_cache *s) - void kmem_cache_destroy(struct kmem_cache *s) - { - int refcnt; -+ bool rcu_set; - - if (unlikely(!s) || !kasan_check_byte(s)) - return; -@@ -510,6 +511,8 @@ void kmem_cache_destroy(struct kmem_cache *s) - cpus_read_lock(); - mutex_lock(&slab_mutex); - -+ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; -+ - refcnt = --s->refcount; - if (refcnt) - goto out_unlock; -@@ -520,7 +523,7 @@ void kmem_cache_destroy(struct kmem_cache *s) - out_unlock: - mutex_unlock(&slab_mutex); - cpus_read_unlock(); -- if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU)) -+ if (!refcnt && !rcu_set) - kmem_cache_release(s); - } - EXPORT_SYMBOL(kmem_cache_destroy); -diff --git a/mm/slub.c b/mm/slub.c -index b1281b8654bd3..1eec942b8336c 100644 ---- a/mm/slub.c -+++ b/mm/slub.c -@@ -310,6 +310,11 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si) - */ - static nodemask_t slab_nodes; - -+/* -+ * Workqueue used for flush_cpu_slab(). -+ */ -+static struct workqueue_struct *flushwq; -+ - /******************************************************************** - * Core slab cache functions - *******************************************************************/ -@@ -2730,7 +2735,7 @@ static void flush_all_cpus_locked(struct kmem_cache *s) - INIT_WORK(&sfw->work, flush_cpu_slab); - sfw->skip = false; - sfw->s = s; -- schedule_work_on(cpu, &sfw->work); -+ queue_work_on(cpu, flushwq, &sfw->work); - } - - for_each_online_cpu(cpu) { -@@ -4880,6 +4885,8 @@ void __init kmem_cache_init(void) - - void __init kmem_cache_init_late(void) - { -+ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0); -+ WARN_ON(!flushwq); - } - - struct kmem_cache * -@@ -4950,6 +4957,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) - /* Honor the call site pointer we received. */ - trace_kmalloc(caller, ret, size, s->size, gfpflags); - -+ ret = kasan_kmalloc(s, ret, size, gfpflags); -+ - return ret; - } - EXPORT_SYMBOL(__kmalloc_track_caller); -@@ -4981,6 +4990,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, - /* Honor the call site pointer we received. */ - trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); - -+ ret = kasan_kmalloc(s, ret, size, gfpflags); -+ - return ret; - } - EXPORT_SYMBOL(__kmalloc_node_track_caller); -@@ -5914,7 +5925,8 @@ static char *create_unique_id(struct kmem_cache *s) - char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); - char *p = name; - -- BUG_ON(!name); -+ if (!name) -+ return ERR_PTR(-ENOMEM); - - *p++ = ':'; - /* -@@ -5972,6 +5984,8 @@ static int sysfs_slab_add(struct kmem_cache *s) - * for the symlinks. - */ - name = create_unique_id(s); -+ if (IS_ERR(name)) -+ return PTR_ERR(name); - } - - s->kobj.kset = kset; -diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c -index b8f8da7ee3dea..41c1ad33d009f 100644 ---- a/net/batman-adv/hard-interface.c -+++ b/net/batman-adv/hard-interface.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -700,6 +701,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, - int max_header_len = batadv_max_header_len(); - int ret; - -+ if (hard_iface->net_dev->mtu < ETH_MIN_MTU + max_header_len) -+ return -EINVAL; -+ - if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) - goto out; - -diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c -index 9a0ae59cdc500..4f385d52a1c49 100644 ---- a/net/bridge/netfilter/ebtables.c -+++ b/net/bridge/netfilter/ebtables.c -@@ -1040,8 +1040,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, - goto free_iterate; - } - -- if (repl->valid_hooks != t->valid_hooks) -+ if (repl->valid_hooks != t->valid_hooks) { -+ ret = -EINVAL; - goto free_unlock; -+ } - - if (repl->num_counters && repl->num_counters != t->private->nentries) { - ret = -EINVAL; -diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c -index 6aee04f75e3e4..bcba61ef5b378 100644 ---- a/net/core/flow_dissector.c -+++ b/net/core/flow_dissector.c -@@ -1572,9 +1572,8 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys) - - switch (keys->control.addr_type) { - case FLOW_DISSECTOR_KEY_IPV4_ADDRS: -- addr_diff = (__force u32)keys->addrs.v4addrs.dst - -- (__force u32)keys->addrs.v4addrs.src; -- if (addr_diff < 0) -+ if ((__force u32)keys->addrs.v4addrs.dst < -+ (__force u32)keys->addrs.v4addrs.src) - swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); - - if ((__force u16)keys->ports.dst < -diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c -index 9f6f4a41245d4..1012012a061fe 100644 ---- a/net/ipv6/af_inet6.c -+++ b/net/ipv6/af_inet6.c -@@ -1069,13 +1069,13 @@ static int __init inet6_init(void) - for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) - INIT_LIST_HEAD(r); - -+ raw_hashinfo_init(&raw_v6_hashinfo); -+ - if (disable_ipv6_mod) { - pr_info("Loaded, but administratively disabled, reboot required to enable\n"); - goto out; - } - -- raw_hashinfo_init(&raw_v6_hashinfo); -- - err = proto_register(&tcpv6_prot, 1); - if (err) - goto out; -diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c -index 0d9332e9cf71a..617f744a2e3a3 100644 ---- a/net/netfilter/nf_conntrack_ftp.c -+++ b/net/netfilter/nf_conntrack_ftp.c -@@ -33,6 +33,7 @@ MODULE_AUTHOR("Rusty Russell "); - MODULE_DESCRIPTION("ftp connection tracking helper"); - MODULE_ALIAS("ip_conntrack_ftp"); - MODULE_ALIAS_NFCT_HELPER(HELPER_NAME); -+static DEFINE_SPINLOCK(nf_ftp_lock); - - #define MAX_PORTS 8 - static u_int16_t ports[MAX_PORTS]; -@@ -409,7 +410,8 @@ static int help(struct sk_buff *skb, - } - datalen = skb->len - dataoff; - -- spin_lock_bh(&ct->lock); -+ /* seqadj (nat) uses ct->lock internally, nf_nat_ftp would cause deadlock */ -+ spin_lock_bh(&nf_ftp_lock); - fb_ptr = skb->data + dataoff; - - ends_in_nl = (fb_ptr[datalen - 1] == '\n'); -@@ -538,7 +540,7 @@ out_update_nl: - if (ends_in_nl) - update_nl_seq(ct, seq, ct_ftp_info, dir, skb); - out: -- spin_unlock_bh(&ct->lock); -+ spin_unlock_bh(&nf_ftp_lock); - return ret; - } - -diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c -index 992decbcaa5c1..5703846bea3b6 100644 ---- a/net/netfilter/nf_conntrack_irc.c -+++ b/net/netfilter/nf_conntrack_irc.c -@@ -157,15 +157,37 @@ static int help(struct sk_buff *skb, unsigned int protoff, - data = ib_ptr; - data_limit = ib_ptr + datalen; - -- /* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24 -- * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */ -- while (data < data_limit - (19 + MINMATCHLEN)) { -- if (memcmp(data, "\1DCC ", 5)) { -+ /* Skip any whitespace */ -+ while (data < data_limit - 10) { -+ if (*data == ' ' || *data == '\r' || *data == '\n') -+ data++; -+ else -+ break; -+ } -+ -+ /* strlen("PRIVMSG x ")=10 */ -+ if (data < data_limit - 10) { -+ if (strncasecmp("PRIVMSG ", data, 8)) -+ goto out; -+ data += 8; -+ } -+ -+ /* strlen(" :\1DCC SENT t AAAAAAAA P\1\n")=26 -+ * 7+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=26 -+ */ -+ while (data < data_limit - (21 + MINMATCHLEN)) { -+ /* Find first " :", the start of message */ -+ if (memcmp(data, " :", 2)) { - data++; - continue; - } -+ data += 2; -+ -+ /* then check that place only for the DCC command */ -+ if (memcmp(data, "\1DCC ", 5)) -+ goto out; - data += 5; -- /* we have at least (19+MINMATCHLEN)-5 bytes valid data left */ -+ /* we have at least (21+MINMATCHLEN)-(2+5) bytes valid data left */ - - iph = ip_hdr(skb); - pr_debug("DCC found in master %pI4:%u %pI4:%u\n", -@@ -181,7 +203,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, - pr_debug("DCC %s detected\n", dccprotos[i]); - - /* we have at least -- * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid -+ * (21+MINMATCHLEN)-7-dccprotos[i].matchlen bytes valid - * data left (== 14/13 bytes) */ - if (parse_dcc(data, data_limit, &dcc_ip, - &dcc_port, &addr_beg_p, &addr_end_p)) { -diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c -index b83dc9bf0a5dd..78fd9122b70c7 100644 ---- a/net/netfilter/nf_conntrack_sip.c -+++ b/net/netfilter/nf_conntrack_sip.c -@@ -477,7 +477,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, - return ret; - if (ret == 0) - break; -- dataoff += *matchoff; -+ dataoff = *matchoff; - } - *in_header = 0; - } -@@ -489,7 +489,7 @@ static int ct_sip_walk_headers(const struct nf_conn *ct, const char *dptr, - break; - if (ret == 0) - return ret; -- dataoff += *matchoff; -+ dataoff = *matchoff; - } - - if (in_header) -diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c -index 848cc81d69926..2fde193c3d26a 100644 ---- a/net/netfilter/nf_tables_api.c -+++ b/net/netfilter/nf_tables_api.c -@@ -2197,7 +2197,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, - struct netlink_ext_ack *extack) - { - const struct nlattr * const *nla = ctx->nla; -- struct nft_stats __percpu *stats = NULL; - struct nft_table *table = ctx->table; - struct nft_base_chain *basechain; - struct net *net = ctx->net; -@@ -2212,6 +2211,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, - return -EOVERFLOW; - - if (nla[NFTA_CHAIN_HOOK]) { -+ struct nft_stats __percpu *stats = NULL; - struct nft_chain_hook hook; - - if (flags & NFT_CHAIN_BINDING) -@@ -2243,8 +2243,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, - if (err < 0) { - nft_chain_release_hook(&hook); - kfree(basechain); -+ free_percpu(stats); - return err; - } -+ if (stats) -+ static_branch_inc(&nft_counters_enabled); - } else { - if (flags & NFT_CHAIN_BASE) - return -EINVAL; -@@ -2319,9 +2322,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, - goto err_unregister_hook; - } - -- if (stats) -- static_branch_inc(&nft_counters_enabled); -- - table->use++; - - return 0; -diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c -index 0fa2e20304272..ee6840bd59337 100644 ---- a/net/netfilter/nfnetlink_osf.c -+++ b/net/netfilter/nfnetlink_osf.c -@@ -269,6 +269,7 @@ bool nf_osf_find(const struct sk_buff *skb, - struct nf_osf_hdr_ctx ctx; - const struct tcphdr *tcp; - struct tcphdr _tcph; -+ bool found = false; - - memset(&ctx, 0, sizeof(ctx)); - -@@ -283,10 +284,11 @@ bool nf_osf_find(const struct sk_buff *skb, - - data->genre = f->genre; - data->version = f->version; -+ found = true; - break; - } - -- return true; -+ return found; - } - EXPORT_SYMBOL_GPL(nf_osf_find); - -diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c -index ac366c99086fd..7d7f7bac0216a 100644 ---- a/net/sched/cls_api.c -+++ b/net/sched/cls_api.c -@@ -2136,6 +2136,7 @@ replay: - } - - if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { -+ tfilter_put(tp, fh); - NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); - err = -EINVAL; - goto errout; -diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c -index 0b941dd63d268..86675a79da1e4 100644 ---- a/net/sched/sch_taprio.c -+++ b/net/sched/sch_taprio.c -@@ -67,6 +67,7 @@ struct taprio_sched { - u32 flags; - enum tk_offsets tk_offset; - int clockid; -+ bool offloaded; - atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ - * speeds it's sub-nanoseconds per byte - */ -@@ -1279,6 +1280,8 @@ static int taprio_enable_offload(struct net_device *dev, - goto done; - } - -+ q->offloaded = true; -+ - done: - taprio_offload_free(offload); - -@@ -1293,12 +1296,9 @@ static int taprio_disable_offload(struct net_device *dev, - struct tc_taprio_qopt_offload *offload; - int err; - -- if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) -+ if (!q->offloaded) - return 0; - -- if (!ops->ndo_setup_tc) -- return -EOPNOTSUPP; -- - offload = taprio_offload_alloc(0); - if (!offload) { - NL_SET_ERR_MSG(extack, -@@ -1314,6 +1314,8 @@ static int taprio_disable_offload(struct net_device *dev, - goto out; - } - -+ q->offloaded = false; -+ - out: - taprio_offload_free(offload); - -@@ -1949,12 +1951,14 @@ start_error: - - static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) - { -- struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); -+ struct taprio_sched *q = qdisc_priv(sch); -+ struct net_device *dev = qdisc_dev(sch); -+ unsigned int ntx = cl - 1; - -- if (!dev_queue) -+ if (ntx >= dev->num_tx_queues) - return NULL; - -- return dev_queue->qdisc_sleeping; -+ return q->qdiscs[ntx]; - } - - static unsigned long taprio_find(struct Qdisc *sch, u32 classid) -diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c -index 1f3bb1f6b1f7b..8095876b66eb6 100644 ---- a/net/smc/smc_core.c -+++ b/net/smc/smc_core.c -@@ -2148,7 +2148,7 @@ static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr, - static int smcr_buf_map_usable_links(struct smc_link_group *lgr, - struct smc_buf_desc *buf_desc, bool is_rmb) - { -- int i, rc = 0; -+ int i, rc = 0, cnt = 0; - - /* protect against parallel link reconfiguration */ - mutex_lock(&lgr->llc_conf_mutex); -@@ -2161,9 +2161,12 @@ static int smcr_buf_map_usable_links(struct smc_link_group *lgr, - rc = -ENOMEM; - goto out; - } -+ cnt++; - } - out: - mutex_unlock(&lgr->llc_conf_mutex); -+ if (!rc && !cnt) -+ rc = -EINVAL; - return rc; - } - -diff --git a/scripts/Makefile.debug b/scripts/Makefile.debug -index 9f39b0130551f..8cf1cb22dd934 100644 ---- a/scripts/Makefile.debug -+++ b/scripts/Makefile.debug -@@ -1,20 +1,19 @@ - DEBUG_CFLAGS := -+debug-flags-y := -g - - ifdef CONFIG_DEBUG_INFO_SPLIT - DEBUG_CFLAGS += -gsplit-dwarf --else --DEBUG_CFLAGS += -g - endif - --ifndef CONFIG_AS_IS_LLVM --KBUILD_AFLAGS += -Wa,-gdwarf-2 --endif -- --ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT --dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4 --dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5 --DEBUG_CFLAGS += -gdwarf-$(dwarf-version-y) -+debug-flags-$(CONFIG_DEBUG_INFO_DWARF4) += -gdwarf-4 -+debug-flags-$(CONFIG_DEBUG_INFO_DWARF5) += -gdwarf-5 -+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_AS_IS_GNU),yy) -+# Clang does not pass -g or -gdwarf-* option down to GAS. -+# Add -Wa, prefix to explicitly specify the flags. -+KBUILD_AFLAGS += $(addprefix -Wa$(comma), $(debug-flags-y)) - endif -+DEBUG_CFLAGS += $(debug-flags-y) -+KBUILD_AFLAGS += $(debug-flags-y) - - ifdef CONFIG_DEBUG_INFO_REDUCED - DEBUG_CFLAGS += -fno-var-tracking -@@ -29,5 +28,5 @@ KBUILD_AFLAGS += -gz=zlib - KBUILD_LDFLAGS += --compress-debug-sections=zlib - endif - --KBUILD_CFLAGS += $(DEBUG_CFLAGS) -+KBUILD_CFLAGS += $(DEBUG_CFLAGS) - export DEBUG_CFLAGS -diff --git a/sound/core/init.c b/sound/core/init.c -index 726a8353201f8..4eacfafa41730 100644 ---- a/sound/core/init.c -+++ b/sound/core/init.c -@@ -178,10 +178,8 @@ int snd_card_new(struct device *parent, int idx, const char *xid, - return -ENOMEM; - - err = snd_card_init(card, parent, idx, xid, module, extra_size); -- if (err < 0) { -- kfree(card); -- return err; -- } -+ if (err < 0) -+ return err; /* card is freed by error handler */ - - *card_ret = card; - return 0; -@@ -231,7 +229,7 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid, - card->managed = true; - err = snd_card_init(card, parent, idx, xid, module, extra_size); - if (err < 0) { -- devres_free(card); -+ devres_free(card); /* in managed mode, we need to free manually */ - return err; - } - -@@ -293,6 +291,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent, - mutex_unlock(&snd_card_mutex); - dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n", - idx, snd_ecards_limit - 1, err); -+ if (!card->managed) -+ kfree(card); /* manually free here, as no destructor called */ - return err; - } - set_bit(idx, snd_cards_lock); /* lock it */ -diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c -index c572fb5886d5d..7af2515735957 100644 ---- a/sound/pci/hda/hda_bind.c -+++ b/sound/pci/hda/hda_bind.c -@@ -157,10 +157,10 @@ static int hda_codec_driver_remove(struct device *dev) - return codec->bus->core.ext_ops->hdev_detach(&codec->core); - } - -- refcount_dec(&codec->pcm_ref); - snd_hda_codec_disconnect_pcms(codec); - snd_hda_jack_tbl_disconnect(codec); -- wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref)); -+ if (!refcount_dec_and_test(&codec->pcm_ref)) -+ wait_event(codec->remove_sleep, !refcount_read(&codec->pcm_ref)); - snd_power_sync_ref(codec->bus->card); - - if (codec->patch_ops.free) -diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index b20694fd69dea..6f30c374f896e 100644 ---- a/sound/pci/hda/hda_intel.c -+++ b/sound/pci/hda/hda_intel.c -@@ -2550,6 +2550,8 @@ static const struct pci_device_id azx_ids[] = { - /* 5 Series/3400 */ - { PCI_DEVICE(0x8086, 0x3b56), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, -+ { PCI_DEVICE(0x8086, 0x3b57), -+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, - /* Poulsbo */ - { PCI_DEVICE(0x8086, 0x811b), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_BASE }, -diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c -index 6c209cd26c0ca..c9d9aa6351ecf 100644 ---- a/sound/pci/hda/patch_hdmi.c -+++ b/sound/pci/hda/patch_hdmi.c -@@ -170,6 +170,8 @@ struct hdmi_spec { - bool dyn_pcm_no_legacy; - /* hdmi interrupt trigger control flag for Nvidia codec */ - bool hdmi_intr_trig_ctrl; -+ bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */ -+ - bool intel_hsw_fixup; /* apply Intel platform-specific fixups */ - /* - * Non-generic VIA/NVIDIA specific -@@ -679,15 +681,24 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec, - int ca, int active_channels, - int conn_type) - { -+ struct hdmi_spec *spec = codec->spec; - union audio_infoframe ai; - - memset(&ai, 0, sizeof(ai)); -- if (conn_type == 0) { /* HDMI */ -+ if ((conn_type == 0) || /* HDMI */ -+ /* Nvidia DisplayPort: Nvidia HW expects same layout as HDMI */ -+ (conn_type == 1 && spec->nv_dp_workaround)) { - struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi; - -- hdmi_ai->type = 0x84; -- hdmi_ai->ver = 0x01; -- hdmi_ai->len = 0x0a; -+ if (conn_type == 0) { /* HDMI */ -+ hdmi_ai->type = 0x84; -+ hdmi_ai->ver = 0x01; -+ hdmi_ai->len = 0x0a; -+ } else {/* Nvidia DP */ -+ hdmi_ai->type = 0x84; -+ hdmi_ai->ver = 0x1b; -+ hdmi_ai->len = 0x11 << 2; -+ } - hdmi_ai->CC02_CT47 = active_channels - 1; - hdmi_ai->CA = ca; - hdmi_checksum_audio_infoframe(hdmi_ai); -@@ -3617,6 +3628,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) - spec->pcm_playback.rates = SUPPORTED_RATES; - spec->pcm_playback.maxbps = SUPPORTED_MAXBPS; - spec->pcm_playback.formats = SUPPORTED_FORMATS; -+ spec->nv_dp_workaround = true; - return 0; - } - -@@ -3756,6 +3768,7 @@ static int patch_nvhdmi(struct hda_codec *codec) - spec->chmap.ops.chmap_cea_alloc_validate_get_type = - nvhdmi_chmap_cea_alloc_validate_get_type; - spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; -+ spec->nv_dp_workaround = true; - - codec->link_down_at_suspend = 1; - -@@ -3779,6 +3792,7 @@ static int patch_nvhdmi_legacy(struct hda_codec *codec) - spec->chmap.ops.chmap_cea_alloc_validate_get_type = - nvhdmi_chmap_cea_alloc_validate_get_type; - spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; -+ spec->nv_dp_workaround = true; - - codec->link_down_at_suspend = 1; - -@@ -3984,6 +3998,7 @@ static int tegra_hdmi_init(struct hda_codec *codec) - - generic_hdmi_init_per_pins(codec); - -+ codec->depop_delay = 10; - codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; - spec->chmap.ops.chmap_cea_alloc_validate_get_type = - nvhdmi_chmap_cea_alloc_validate_get_type; -@@ -3992,6 +4007,7 @@ static int tegra_hdmi_init(struct hda_codec *codec) - spec->chmap.ops.chmap_cea_alloc_validate_get_type = - nvhdmi_chmap_cea_alloc_validate_get_type; - spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; -+ spec->nv_dp_workaround = true; - - return 0; - } -diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c -index 799f6bf266dd0..9614b63415a8e 100644 ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -7037,6 +7037,8 @@ enum { - ALC294_FIXUP_ASUS_GU502_HP, - ALC294_FIXUP_ASUS_GU502_PINS, - ALC294_FIXUP_ASUS_GU502_VERBS, -+ ALC294_FIXUP_ASUS_G513_PINS, -+ ALC285_FIXUP_ASUS_G533Z_PINS, - ALC285_FIXUP_HP_GPIO_LED, - ALC285_FIXUP_HP_MUTE_LED, - ALC236_FIXUP_HP_GPIO_LED, -@@ -8374,6 +8376,24 @@ static const struct hda_fixup alc269_fixups[] = { - [ALC294_FIXUP_ASUS_GU502_HP] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc294_fixup_gu502_hp, -+ }, -+ [ALC294_FIXUP_ASUS_G513_PINS] = { -+ .type = HDA_FIXUP_PINS, -+ .v.pins = (const struct hda_pintbl[]) { -+ { 0x19, 0x03a11050 }, /* front HP mic */ -+ { 0x1a, 0x03a11c30 }, /* rear external mic */ -+ { 0x21, 0x03211420 }, /* front HP out */ -+ { } -+ }, -+ }, -+ [ALC285_FIXUP_ASUS_G533Z_PINS] = { -+ .type = HDA_FIXUP_PINS, -+ .v.pins = (const struct hda_pintbl[]) { -+ { 0x14, 0x90170120 }, -+ { } -+ }, -+ .chained = true, -+ .chain_id = ALC294_FIXUP_ASUS_G513_PINS, - }, - [ALC294_FIXUP_ASUS_COEF_1B] = { - .type = HDA_FIXUP_VERBS, -@@ -9114,6 +9134,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), - SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), - SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), -+ SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK), - SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), -@@ -9130,6 +9151,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x1028, 0x0a9d, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0a9e, "Dell Latitude 5430", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0b19, "Dell XPS 15 9520", ALC289_FIXUP_DUAL_SPK), -+ SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK), - SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), -@@ -9257,6 +9279,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), - SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), - SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED), -+ SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED), - SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8972, "HP EliteBook 840 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), -@@ -9304,10 +9327,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), - SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), - SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), -+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), -+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), - SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), - SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), -- SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), - SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), - SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), - SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), -@@ -9323,14 +9347,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), - SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), -+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), - SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), -+ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), - SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), - SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), -+ SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS), - SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), -+ SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), -- SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), -- SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), - SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), - SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), - SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), -@@ -9532,6 +9558,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { - SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), - SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), - SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), -+ SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20), - SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI), - SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101), -diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c -index ff2aa13b7b26f..5d105c44b46df 100644 ---- a/sound/usb/endpoint.c -+++ b/sound/usb/endpoint.c -@@ -758,8 +758,7 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, - * The endpoint needs to be closed via snd_usb_endpoint_close() later. - * - * Note that this function doesn't configure the endpoint. The substream -- * needs to set it up later via snd_usb_endpoint_set_params() and -- * snd_usb_endpoint_prepare(). -+ * needs to set it up later via snd_usb_endpoint_configure(). - */ - struct snd_usb_endpoint * - snd_usb_endpoint_open(struct snd_usb_audio *chip, -@@ -1293,13 +1292,12 @@ out_of_memory: - /* - * snd_usb_endpoint_set_params: configure an snd_usb_endpoint - * -- * It's called either from hw_params callback. - * Determine the number of URBs to be used on this endpoint. - * An endpoint must be configured before it can be started. - * An endpoint that is already running can not be reconfigured. - */ --int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep) -+static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep) - { - const struct audioformat *fmt = ep->cur_audiofmt; - int err; -@@ -1382,18 +1380,18 @@ static int init_sample_rate(struct snd_usb_audio *chip, - } - - /* -- * snd_usb_endpoint_prepare: Prepare the endpoint -+ * snd_usb_endpoint_configure: Configure the endpoint - * - * This function sets up the EP to be fully usable state. -- * It's called either from prepare callback. -+ * It's called either from hw_params or prepare callback. - * The function checks need_setup flag, and performs nothing unless needed, - * so it's safe to call this multiple times. - * - * This returns zero if unchanged, 1 if the configuration has changed, - * or a negative error code. - */ --int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep) -+int snd_usb_endpoint_configure(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep) - { - bool iface_first; - int err = 0; -@@ -1414,6 +1412,9 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, - if (err < 0) - goto unlock; - } -+ err = snd_usb_endpoint_set_params(chip, ep); -+ if (err < 0) -+ goto unlock; - goto done; - } - -@@ -1441,6 +1442,10 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, - if (err < 0) - goto unlock; - -+ err = snd_usb_endpoint_set_params(chip, ep); -+ if (err < 0) -+ goto unlock; -+ - err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt); - if (err < 0) - goto unlock; -diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h -index e67ea28faa54f..6a9af04cf175a 100644 ---- a/sound/usb/endpoint.h -+++ b/sound/usb/endpoint.h -@@ -17,10 +17,8 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip, - bool is_sync_ep); - void snd_usb_endpoint_close(struct snd_usb_audio *chip, - struct snd_usb_endpoint *ep); --int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep); --int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep); -+int snd_usb_endpoint_configure(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep); - int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock); - - bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, -diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c -index 02035b545f9dd..e692ae04436a5 100644 ---- a/sound/usb/pcm.c -+++ b/sound/usb/pcm.c -@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip, - if (stop_endpoints(subs, false)) - sync_pending_stops(subs); - if (subs->sync_endpoint) { -- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); -+ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); - if (err < 0) - return err; - } -- err = snd_usb_endpoint_prepare(chip, subs->data_endpoint); -+ err = snd_usb_endpoint_configure(chip, subs->data_endpoint); - if (err < 0) - return err; - snd_usb_set_format_quirk(subs, subs->cur_audiofmt); - } else { - if (subs->sync_endpoint) { -- err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); -+ err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); - if (err < 0) - return err; - } -@@ -551,13 +551,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, - subs->cur_audiofmt = fmt; - mutex_unlock(&chip->mutex); - -- if (subs->sync_endpoint) { -- ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint); -- if (ret < 0) -- goto unlock; -- } -- -- ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint); -+ ret = configure_endpoints(chip, subs); - - unlock: - if (ret < 0) -diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c -index 6b1bafe267a42..8ec5b9f344e02 100644 ---- a/tools/lib/perf/evlist.c -+++ b/tools/lib/perf/evlist.c -@@ -441,6 +441,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - - perf_evlist__for_each_entry(evlist, evsel) { - bool overwrite = evsel->attr.write_backward; -+ enum fdarray_flags flgs; - struct perf_mmap *map; - int *output, fd, cpu; - -@@ -504,8 +505,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - - revent = !overwrite ? POLLIN : 0; - -- if (!evsel->system_wide && -- perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) { -+ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default; -+ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) { - perf_mmap__put(map); - return -1; - } -diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c -index 63b9db6574425..97c69a249c6e4 100644 ---- a/tools/perf/util/bpf_counter_cgroup.c -+++ b/tools/perf/util/bpf_counter_cgroup.c -@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist) - - perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) { - link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch, -- FD(cgrp_switch, cpu.cpu)); -+ FD(cgrp_switch, i)); - if (IS_ERR(link)) { - pr_err("Failed to attach cgroup program\n"); - err = PTR_ERR(link); -@@ -123,7 +123,7 @@ static int bperf_load_program(struct evlist *evlist) - - map_fd = bpf_map__fd(skel->maps.events); - perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) { -- int fd = FD(evsel, cpu.cpu); -+ int fd = FD(evsel, j); - __u32 idx = evsel->core.idx * total_cpus + cpu.cpu; - - err = bpf_map_update_elem(map_fd, &idx, &fd, -diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c -index 292c430768b52..c72f8ad96f751 100644 ---- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c -+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c -@@ -176,7 +176,7 @@ static int bperf_cgroup_count(void) - } - - // This will be attached to cgroup-switches event for each cpu --SEC("perf_events") -+SEC("perf_event") - int BPF_PROG(on_cgrp_switch) - { - return bperf_cgroup_count(); -diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c -index 953338b9e887e..02cd9f75e3d2f 100644 ---- a/tools/perf/util/genelf.c -+++ b/tools/perf/util/genelf.c -@@ -251,6 +251,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, - Elf_Data *d; - Elf_Scn *scn; - Elf_Ehdr *ehdr; -+ Elf_Phdr *phdr; - Elf_Shdr *shdr; - uint64_t eh_frame_base_offset; - char *strsym = NULL; -@@ -285,6 +286,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, - ehdr->e_version = EV_CURRENT; - ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */ - -+ /* -+ * setup program header -+ */ -+ phdr = elf_newphdr(e, 1); -+ phdr[0].p_type = PT_LOAD; -+ phdr[0].p_offset = 0; -+ phdr[0].p_vaddr = 0; -+ phdr[0].p_paddr = 0; -+ phdr[0].p_filesz = csize; -+ phdr[0].p_memsz = csize; -+ phdr[0].p_flags = PF_X | PF_R; -+ phdr[0].p_align = 8; -+ - /* - * setup text section - */ -diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h -index ae138afe6c563..b5c909546e3f2 100644 ---- a/tools/perf/util/genelf.h -+++ b/tools/perf/util/genelf.h -@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent - - #if GEN_ELF_CLASS == ELFCLASS64 - #define elf_newehdr elf64_newehdr -+#define elf_newphdr elf64_newphdr - #define elf_getshdr elf64_getshdr - #define Elf_Ehdr Elf64_Ehdr -+#define Elf_Phdr Elf64_Phdr - #define Elf_Shdr Elf64_Shdr - #define Elf_Sym Elf64_Sym - #define ELF_ST_TYPE(a) ELF64_ST_TYPE(a) -@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent - #define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a) - #else - #define elf_newehdr elf32_newehdr -+#define elf_newphdr elf32_newphdr - #define elf_getshdr elf32_getshdr - #define Elf_Ehdr Elf32_Ehdr -+#define Elf_Phdr Elf32_Phdr - #define Elf_Shdr Elf32_Shdr - #define Elf_Sym Elf32_Sym - #define ELF_ST_TYPE(a) ELF32_ST_TYPE(a) -diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c -index 75bec32d4f571..647b7dff8ef36 100644 ---- a/tools/perf/util/symbol-elf.c -+++ b/tools/perf/util/symbol-elf.c -@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, - * unusual. One significant peculiarity is that the mapping (start -> pgoff) - * is not the same for the kernel map and the modules map. That happens because - * the data is copied adjacently whereas the original kcore has gaps. Finally, -- * kallsyms and modules files are compared with their copies to check that -- * modules have not been loaded or unloaded while the copies were taking place. -+ * kallsyms file is compared with its copy to check that modules have not been -+ * loaded or unloaded while the copies were taking place. - * - * Return: %0 on success, %-1 on failure. - */ -@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir) - goto out_extract_close; - } - -- if (kcore_copy__compare_file(from_dir, to_dir, "modules")) -- goto out_extract_close; -- - if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) - goto out_extract_close; - -diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c -index 84d17bd4efaed..64e273b2b1b21 100644 ---- a/tools/perf/util/synthetic-events.c -+++ b/tools/perf/util/synthetic-events.c -@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, - bool is_kernel) - { - struct build_id bid; -+ struct nsinfo *nsi; -+ struct nscookie nc; - int rc; - -- if (is_kernel) -+ if (is_kernel) { - rc = sysfs__read_build_id("/sys/kernel/notes", &bid); -- else -- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; -+ goto out; -+ } -+ -+ nsi = nsinfo__new(event->pid); -+ nsinfo__mountns_enter(nsi, &nc); - -+ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; -+ -+ nsinfo__mountns_exit(&nc); -+ nsinfo__put(nsi); -+ -+out: - if (rc == 0) { - memcpy(event->build_id, bid.data, sizeof(bid.data)); - event->build_id_size = (u8) bid.size; -diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh -index e714bae473fb4..81f31179ac887 100755 ---- a/tools/testing/selftests/net/forwarding/sch_red.sh -+++ b/tools/testing/selftests/net/forwarding/sch_red.sh -@@ -1,3 +1,4 @@ -+#!/bin/bash - # SPDX-License-Identifier: GPL-2.0 - - # This test sends one stream of traffic from H1 through a TBF shaper, to a RED diff --git a/sys-kernel/pinephone-sources/files/5.19.8-9.patch b/sys-kernel/pinephone-sources/files/5.19.8-9.patch deleted file mode 100644 index f12fb56..0000000 --- a/sys-kernel/pinephone-sources/files/5.19.8-9.patch +++ /dev/null @@ -1,8234 +0,0 @@ -diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst -index 33b04db8408f9..fda97b3fcf018 100644 ---- a/Documentation/arm64/silicon-errata.rst -+++ b/Documentation/arm64/silicon-errata.rst -@@ -52,6 +52,8 @@ stable kernels. - | Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | - +----------------+-----------------+-----------------+-----------------------------+ - +----------------+-----------------+-----------------+-----------------------------+ -+| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 | -++----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 | - +----------------+-----------------+-----------------+-----------------------------+ - | ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 | -diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst -index 78ca69eda8778..02f4ad314a1eb 100644 ---- a/Documentation/hwmon/asus_ec_sensors.rst -+++ b/Documentation/hwmon/asus_ec_sensors.rst -@@ -13,12 +13,16 @@ Supported boards: - * ROG CROSSHAIR VIII FORMULA - * ROG CROSSHAIR VIII HERO - * ROG CROSSHAIR VIII IMPACT -+ * ROG MAXIMUS XI HERO -+ * ROG MAXIMUS XI HERO (WI-FI) - * ROG STRIX B550-E GAMING - * ROG STRIX B550-I GAMING - * ROG STRIX X570-E GAMING - * ROG STRIX X570-E GAMING WIFI II - * ROG STRIX X570-F GAMING - * ROG STRIX X570-I GAMING -+ * ROG STRIX Z690-A GAMING WIFI D4 -+ * ROG ZENITH II EXTREME - - Authors: - - Eugene Shalygin -diff --git a/Makefile b/Makefile -index e361c6230e9e5..1f27c4bd09e67 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 19 --SUBLEVEL = 8 -+SUBLEVEL = 9 - EXTRAVERSION = - NAME = Superb Owl - -@@ -1286,8 +1286,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj - - PHONY += headers - headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts -- $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \ -- $(error Headers not exportable for the $(SRCARCH) architecture)) -+ $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML)) - $(Q)$(MAKE) $(hdr-inst)=include/uapi - $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi - -diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi -index ba621783acdbc..d6f364c6be94b 100644 ---- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi -+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi -@@ -76,8 +76,8 @@ - regulators { - vdd_3v3: VDD_IO { - regulator-name = "VDD_IO"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -95,8 +95,8 @@ - - vddio_ddr: VDD_DDR { - regulator-name = "VDD_DDR"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1200000>; -+ regulator-max-microvolt = <1200000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -118,8 +118,8 @@ - - vdd_core: VDD_CORE { - regulator-name = "VDD_CORE"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1250000>; -+ regulator-max-microvolt = <1250000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -160,8 +160,8 @@ - - LDO1 { - regulator-name = "LDO1"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-always-on; - - regulator-state-standby { -@@ -175,9 +175,8 @@ - - LDO2 { - regulator-name = "LDO2"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -- regulator-always-on; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <3300000>; - - regulator-state-standby { - regulator-on-in-suspend; -diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts -index 164201a8fbf2d..492456e195a37 100644 ---- a/arch/arm/boot/dts/at91-sama5d2_icp.dts -+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts -@@ -197,8 +197,8 @@ - regulators { - vdd_io_reg: VDD_IO { - regulator-name = "VDD_IO"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -216,8 +216,8 @@ - - VDD_DDR { - regulator-name = "VDD_DDR"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1350000>; -+ regulator-max-microvolt = <1350000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -235,8 +235,8 @@ - - VDD_CORE { - regulator-name = "VDD_CORE"; -- regulator-min-microvolt = <600000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1250000>; -+ regulator-max-microvolt = <1250000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -258,7 +258,6 @@ - regulator-max-microvolt = <1850000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; -- regulator-always-on; - - regulator-state-standby { - regulator-on-in-suspend; -@@ -273,8 +272,8 @@ - - LDO1 { - regulator-name = "LDO1"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <2500000>; -+ regulator-max-microvolt = <2500000>; - regulator-always-on; - - regulator-state-standby { -@@ -288,8 +287,8 @@ - - LDO2 { - regulator-name = "LDO2"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-always-on; - - regulator-state-standby { -diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts -index 103544620fd7c..b261b4da08502 100644 ---- a/arch/arm/boot/dts/at91-sama7g5ek.dts -+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts -@@ -244,8 +244,8 @@ - regulators { - vdd_3v3: VDD_IO { - regulator-name = "VDD_IO"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <3300000>; -+ regulator-max-microvolt = <3300000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -264,8 +264,8 @@ - - vddioddr: VDD_DDR { - regulator-name = "VDD_DDR"; -- regulator-min-microvolt = <1300000>; -- regulator-max-microvolt = <1450000>; -+ regulator-min-microvolt = <1350000>; -+ regulator-max-microvolt = <1350000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -285,8 +285,8 @@ - - vddcore: VDD_CORE { - regulator-name = "VDD_CORE"; -- regulator-min-microvolt = <1100000>; -- regulator-max-microvolt = <1850000>; -+ regulator-min-microvolt = <1150000>; -+ regulator-max-microvolt = <1150000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-always-on; -@@ -306,7 +306,7 @@ - vddcpu: VDD_OTHER { - regulator-name = "VDD_OTHER"; - regulator-min-microvolt = <1050000>; -- regulator-max-microvolt = <1850000>; -+ regulator-max-microvolt = <1250000>; - regulator-initial-mode = <2>; - regulator-allowed-modes = <2>, <4>; - regulator-ramp-delay = <3125>; -@@ -326,8 +326,8 @@ - - vldo1: LDO1 { - regulator-name = "LDO1"; -- regulator-min-microvolt = <1200000>; -- regulator-max-microvolt = <3700000>; -+ regulator-min-microvolt = <1800000>; -+ regulator-max-microvolt = <1800000>; - regulator-always-on; - - regulator-state-standby { -diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi -index 095c9143d99a3..6b791d515e294 100644 ---- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi -@@ -51,16 +51,6 @@ - vin-supply = <®_3p3v_s5>; - }; - -- reg_3p3v_s0: regulator-3p3v-s0 { -- compatible = "regulator-fixed"; -- regulator-name = "V_3V3_S0"; -- regulator-min-microvolt = <3300000>; -- regulator-max-microvolt = <3300000>; -- regulator-always-on; -- regulator-boot-on; -- vin-supply = <®_3p3v_s5>; -- }; -- - reg_3p3v_s5: regulator-3p3v-s5 { - compatible = "regulator-fixed"; - regulator-name = "V_3V3_S5"; -@@ -259,7 +249,7 @@ - - /* default boot source: workaround #1 for errata ERR006282 */ - smarc_flash: flash@0 { -- compatible = "winbond,w25q16dw", "jedec,spi-nor"; -+ compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <20000000>; - }; -diff --git a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi -index a1676b5d2980f..c5a98b0110dd3 100644 ---- a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi -+++ b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi -@@ -28,7 +28,7 @@ - enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>; - }; - -- backlight_led: backlight_led { -+ backlight_led: backlight-led { - compatible = "pwm-backlight"; - pwms = <&pwm3 0 5000000 0>; - brightness-levels = <0 16 64 255>; -diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c -index df6d673e83d56..f4501dea98b04 100644 ---- a/arch/arm/mach-at91/pm.c -+++ b/arch/arm/mach-at91/pm.c -@@ -541,9 +541,41 @@ extern u32 at91_pm_suspend_in_sram_sz; - - static int at91_suspend_finish(unsigned long val) - { -+ unsigned char modified_gray_code[] = { -+ 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, -+ 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, -+ 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, -+ 0x10, 0x11, -+ }; -+ unsigned int tmp, index; - int i; - - if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) { -+ /* -+ * Bootloader will perform DDR recalibration and will try to -+ * restore the ZQ0SR0 with the value saved here. But the -+ * calibration is buggy and restoring some values from ZQ0SR0 -+ * is forbidden and risky thus we need to provide processed -+ * values for these (modified gray code values). -+ */ -+ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); -+ -+ /* Store pull-down output impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; -+ -+ /* Store pull-up output impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; -+ -+ /* Store pull-down on-die termination impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; -+ -+ /* Store pull-up on-die termination impedance select. */ -+ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; -+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; -+ - /* - * The 1st 8 words of memory might get corrupted in the process - * of DDR PHY recalibration; it is saved here in securam and it -@@ -1066,10 +1098,6 @@ static int __init at91_pm_backup_init(void) - of_scan_flat_dt(at91_pm_backup_scan_memcs, &located); - if (!located) - goto securam_fail; -- -- /* DDR3PHY_ZQ0SR0 */ -- soc_pm.bu->ddr_phy_calibration[0] = readl(soc_pm.data.ramc_phy + -- 0x188); - } - - return 0; -diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S -index abe4ced33edaf..ffed4d9490428 100644 ---- a/arch/arm/mach-at91/pm_suspend.S -+++ b/arch/arm/mach-at91/pm_suspend.S -@@ -172,9 +172,15 @@ sr_ena_2: - /* Put DDR PHY's DLL in bypass mode for non-backup modes. */ - cmp r7, #AT91_PM_BACKUP - beq sr_ena_3 -- ldr tmp1, [r3, #DDR3PHY_PIR] -- orr tmp1, tmp1, #DDR3PHY_PIR_DLLBYP -- str tmp1, [r3, #DDR3PHY_PIR] -+ -+ /* Disable DX DLLs. */ -+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ -+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] -+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX1DLLCR] - - sr_ena_3: - /* Power down DDR PHY data receivers. */ -@@ -221,10 +227,14 @@ sr_ena_3: - bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0 - str tmp1, [r3, #DDR3PHY_DSGCR] - -- /* Take DDR PHY's DLL out of bypass mode. */ -- ldr tmp1, [r3, #DDR3PHY_PIR] -- bic tmp1, tmp1, #DDR3PHY_PIR_DLLBYP -- str tmp1, [r3, #DDR3PHY_PIR] -+ /* Enable DX DLLs. */ -+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX0DLLCR] -+ -+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR] -+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS -+ str tmp1, [r3, #DDR3PHY_DX1DLLCR] - - /* Enable quasi-dynamic programming. */ - mov tmp1, #0 -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 001eaba5a6b4b..cc1e7bb49d38b 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -914,6 +914,23 @@ config ARM64_ERRATUM_1902691 - - If unsure, say Y. - -+config ARM64_ERRATUM_2457168 -+ bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" -+ depends on ARM64_AMU_EXTN -+ default y -+ help -+ This option adds the workaround for ARM Cortex-A510 erratum 2457168. -+ -+ The AMU counter AMEVCNTR01 (constant counter) should increment at the same rate -+ as the system counter. On affected Cortex-A510 cores AMEVCNTR01 increments -+ incorrectly giving a significantly higher output value. -+ -+ Work around this problem by returning 0 when reading the affected counter in -+ key locations that results in disabling all users of this counter. This effect -+ is the same to firmware disabling affected counters. -+ -+ If unsure, say Y. -+ - config CAVIUM_ERRATUM_22375 - bool "Cavium erratum 22375, 24313" - default y -@@ -1867,6 +1884,8 @@ config ARM64_BTI_KERNEL - depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 - depends on !CC_IS_GCC || GCC_VERSION >= 100100 -+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671 -+ depends on !CC_IS_GCC - # https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9 - depends on !CC_IS_CLANG || CLANG_VERSION >= 120000 - depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) -diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts -index 40d34c8384a5e..b949cac037427 100644 ---- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts -+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts -@@ -25,7 +25,6 @@ - &enetc_port0 { - phy-handle = <&slot1_sgmii>; - phy-mode = "2500base-x"; -- managed = "in-band-status"; - status = "okay"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts -index 24737e89038a4..96cac0f969a77 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts -@@ -626,24 +626,28 @@ - lan1: port@0 { - reg = <0>; - label = "lan1"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan2: port@1 { - reg = <1>; - label = "lan2"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan3: port@2 { - reg = <2>; - label = "lan3"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - - lan4: port@3 { - reg = <3>; - label = "lan4"; -+ phy-mode = "internal"; - local-mac-address = [00 00 00 00 00 00]; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi -index eafa88d980b32..c2d4da25482ff 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi -@@ -32,10 +32,10 @@ - }; - - /* Fixed clock dedicated to SPI CAN controller */ -- clk20m: oscillator { -+ clk40m: oscillator { - compatible = "fixed-clock"; - #clock-cells = <0>; -- clock-frequency = <20000000>; -+ clock-frequency = <40000000>; - }; - - gpio-keys { -@@ -194,8 +194,8 @@ - - can1: can@0 { - compatible = "microchip,mcp251xfd"; -- clocks = <&clk20m>; -- interrupts-extended = <&gpio1 6 IRQ_TYPE_EDGE_FALLING>; -+ clocks = <&clk40m>; -+ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_LOW>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_can1_int>; - reg = <0>; -@@ -595,7 +595,7 @@ - pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>; - reg = <0x4a>; - /* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */ -- reset-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>; -+ reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; - status = "disabled"; - }; - -@@ -737,6 +737,7 @@ - }; - - &usbphynop2 { -+ power-domains = <&pgc_otg2>; - vcc-supply = <®_vdd_3v3>; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts -index 521215520a0f4..6630ec561dc25 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts -+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts -@@ -770,10 +770,10 @@ - - pinctrl_sai2: sai2grp { - fsl,pins = < -- MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC -- MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 -- MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK -- MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK -+ MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC 0xd6 -+ MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 0xd6 -+ MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK 0xd6 -+ MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK 0xd6 - >; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi -index fb17e329cd370..f5323291a9b24 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi -@@ -620,7 +620,7 @@ - interrupts = <5 IRQ_TYPE_EDGE_FALLING>; - reg = <0x4a>; - /* Verdin GPIO_2 (SODIMM 208) */ -- reset-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>; -+ reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; - status = "disabled"; - }; - }; -@@ -697,7 +697,7 @@ - pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>; - reg = <0x4a>; - /* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */ -- reset-gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>; -+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; - status = "disabled"; - }; - -diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi -index 899e8e7dbc24f..802ad6e5cef61 100644 ---- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi -+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi -@@ -204,7 +204,6 @@ - reg = <0x51>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_rtc>; -- interrupt-names = "irq"; - interrupt-parent = <&gpio1>; - interrupts = <1 IRQ_TYPE_EDGE_FALLING>; - quartz-load-femtofarads = <7000>; -diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi -index 7cbb0de060ddc..1c15726cff8bf 100644 ---- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi -+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi -@@ -85,7 +85,7 @@ - "renesas,rcar-gen4-hscif", - "renesas,hscif"; - reg = <0 0xe6540000 0 96>; -- interrupts = ; -+ interrupts = ; - clocks = <&cpg CPG_MOD 514>, - <&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>, - <&scif_clk>; -diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c -index 5f4117dae8888..af137f91607da 100644 ---- a/arch/arm64/kernel/cpu_errata.c -+++ b/arch/arm64/kernel/cpu_errata.c -@@ -656,6 +656,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2) - }, - #endif -+#ifdef CONFIG_ARM64_ERRATUM_2457168 -+ { -+ .desc = "ARM erratum 2457168", -+ .capability = ARM64_WORKAROUND_2457168, -+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, -+ -+ /* Cortex-A510 r0p0-r1p1 */ -+ CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) -+ }, -+#endif - #ifdef CONFIG_ARM64_ERRATUM_2038923 - { - .desc = "ARM erratum 2038923", -diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c -index ebdfbd1cf207b..f34c9f8b9ee0a 100644 ---- a/arch/arm64/kernel/cpufeature.c -+++ b/arch/arm64/kernel/cpufeature.c -@@ -1798,7 +1798,10 @@ static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) - pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", - smp_processor_id()); - cpumask_set_cpu(smp_processor_id(), &amu_cpus); -- update_freq_counters_refs(); -+ -+ /* 0 reference values signal broken/disabled counters */ -+ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) -+ update_freq_counters_refs(); - } - } - -diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c -index af5df48ba915b..2e248342476ea 100644 ---- a/arch/arm64/kernel/hibernate.c -+++ b/arch/arm64/kernel/hibernate.c -@@ -300,6 +300,11 @@ static void swsusp_mte_restore_tags(void) - unsigned long pfn = xa_state.xa_index; - struct page *page = pfn_to_online_page(pfn); - -+ /* -+ * It is not required to invoke page_kasan_tag_reset(page) -+ * at this point since the tags stored in page->flags are -+ * already restored. -+ */ - mte_restore_page_tags(page_address(page), tags); - - mte_free_tag_storage(tags); -diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c -index b2b730233274b..f6b00743c3994 100644 ---- a/arch/arm64/kernel/mte.c -+++ b/arch/arm64/kernel/mte.c -@@ -48,6 +48,15 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, - if (!pte_is_tagged) - return; - -+ page_kasan_tag_reset(page); -+ /* -+ * We need smp_wmb() in between setting the flags and clearing the -+ * tags because if another thread reads page->flags and builds a -+ * tagged address out of it, there is an actual dependency to the -+ * memory access, but on the current thread we do not guarantee that -+ * the new page->flags are visible before the tags were updated. -+ */ -+ smp_wmb(); - mte_clear_page_tags(page_address(page)); - } - -diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c -index 9ab78ad826e2a..707b5451929d4 100644 ---- a/arch/arm64/kernel/topology.c -+++ b/arch/arm64/kernel/topology.c -@@ -310,12 +310,25 @@ core_initcall(init_amu_fie); - - static void cpu_read_corecnt(void *val) - { -+ /* -+ * A value of 0 can be returned if the current CPU does not support AMUs -+ * or if the counter is disabled for this CPU. A return value of 0 at -+ * counter read is properly handled as an error case by the users of the -+ * counter. -+ */ - *(u64 *)val = read_corecnt(); - } - - static void cpu_read_constcnt(void *val) - { -- *(u64 *)val = read_constcnt(); -+ /* -+ * Return 0 if the current CPU is affected by erratum 2457168. A value -+ * of 0 is also returned if the current CPU does not support AMUs or if -+ * the counter is disabled. A return value of 0 at counter read is -+ * properly handled as an error case by the users of the counter. -+ */ -+ *(u64 *)val = this_cpu_has_cap(ARM64_WORKAROUND_2457168) ? -+ 0UL : read_constcnt(); - } - - static inline -@@ -342,7 +355,22 @@ int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) - */ - bool cpc_ffh_supported(void) - { -- return freq_counters_valid(get_cpu_with_amu_feat()); -+ int cpu = get_cpu_with_amu_feat(); -+ -+ /* -+ * FFH is considered supported if there is at least one present CPU that -+ * supports AMUs. Using FFH to read core and reference counters for CPUs -+ * that do not support AMUs, have counters disabled or that are affected -+ * by errata, will result in a return value of 0. -+ * -+ * This is done to allow any enabled and valid counters to be read -+ * through FFH, knowing that potentially returning 0 as counter value is -+ * properly handled by the users of these counters. -+ */ -+ if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) -+ return false; -+ -+ return true; - } - - int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) -diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c -index 24913271e898c..0dea80bf6de46 100644 ---- a/arch/arm64/mm/copypage.c -+++ b/arch/arm64/mm/copypage.c -@@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from) - - if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { - set_bit(PG_mte_tagged, &to->flags); -+ page_kasan_tag_reset(to); -+ /* -+ * We need smp_wmb() in between setting the flags and clearing the -+ * tags because if another thread reads page->flags and builds a -+ * tagged address out of it, there is an actual dependency to the -+ * memory access, but on the current thread we do not guarantee that -+ * the new page->flags are visible before the tags were updated. -+ */ -+ smp_wmb(); - mte_copy_page_tags(kto, kfrom); - } - } -diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c -index 4334dec93bd44..a9e50e930484a 100644 ---- a/arch/arm64/mm/mteswap.c -+++ b/arch/arm64/mm/mteswap.c -@@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) - if (!tags) - return false; - -+ page_kasan_tag_reset(page); -+ /* -+ * We need smp_wmb() in between setting the flags and clearing the -+ * tags because if another thread reads page->flags and builds a -+ * tagged address out of it, there is an actual dependency to the -+ * memory access, but on the current thread we do not guarantee that -+ * the new page->flags are visible before the tags were updated. -+ */ -+ smp_wmb(); - mte_restore_page_tags(page_address(page), tags); - - return true; -diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps -index 8809e14cf86a2..18999f46df19f 100644 ---- a/arch/arm64/tools/cpucaps -+++ b/arch/arm64/tools/cpucaps -@@ -66,6 +66,7 @@ WORKAROUND_1902691 - WORKAROUND_2038923 - WORKAROUND_2064142 - WORKAROUND_2077057 -+WORKAROUND_2457168 - WORKAROUND_TRBE_OVERWRITE_FILL_MODE - WORKAROUND_TSB_FLUSH_FAILURE - WORKAROUND_TRBE_WRITE_OUT_OF_RANGE -diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c -index e9de6da0ce51f..9dcfe9de55b0a 100644 ---- a/arch/mips/loongson32/ls1c/board.c -+++ b/arch/mips/loongson32/ls1c/board.c -@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = { - static int __init ls1c_platform_init(void) - { - ls1x_serial_set_uartclk(&ls1x_uart_pdev); -- ls1x_rtc_set_extclk(&ls1x_rtc_pdev); - - return platform_add_devices(ls1c_platform_devices, - ARRAY_SIZE(ls1c_platform_devices)); -diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h -index 56ffd260c669b..0ec9cfc5131fc 100644 ---- a/arch/parisc/include/asm/bitops.h -+++ b/arch/parisc/include/asm/bitops.h -@@ -12,14 +12,6 @@ - #include - #include - --/* compiler build environment sanity checks: */ --#if !defined(CONFIG_64BIT) && defined(__LP64__) --#error "Please use 'ARCH=parisc' to build the 32-bit kernel." --#endif --#if defined(CONFIG_64BIT) && !defined(__LP64__) --#error "Please use 'ARCH=parisc64' to build the 64-bit kernel." --#endif -- - /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion - * on use of volatile and __*_bit() (set/clear/change): - * *_bit() want use of volatile. -diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S -index e0a9e96576221..fd15fd4bbb61b 100644 ---- a/arch/parisc/kernel/head.S -+++ b/arch/parisc/kernel/head.S -@@ -22,7 +22,7 @@ - #include - #include - -- .level PA_ASM_LEVEL -+ .level 1.1 - - __INITDATA - ENTRY(boot_args) -@@ -70,6 +70,47 @@ $bss_loop: - stw,ma %arg2,4(%r1) - stw,ma %arg3,4(%r1) - -+#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) -+ /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU -+ * and halt kernel if we detect a PA1.x CPU. */ -+ ldi 32,%r10 -+ mtctl %r10,%cr11 -+ .level 2.0 -+ mfctl,w %cr11,%r10 -+ .level 1.1 -+ comib,<>,n 0,%r10,$cpu_ok -+ -+ load32 PA(msg1),%arg0 -+ ldi msg1_end-msg1,%arg1 -+$iodc_panic: -+ copy %arg0, %r10 -+ copy %arg1, %r11 -+ load32 PA(init_stack),%sp -+#define MEM_CONS 0x3A0 -+ ldw MEM_CONS+32(%r0),%arg0 // HPA -+ ldi ENTRY_IO_COUT,%arg1 -+ ldw MEM_CONS+36(%r0),%arg2 // SPA -+ ldw MEM_CONS+8(%r0),%arg3 // layers -+ load32 PA(__bss_start),%r1 -+ stw %r1,-52(%sp) // arg4 -+ stw %r0,-56(%sp) // arg5 -+ stw %r10,-60(%sp) // arg6 = ptr to text -+ stw %r11,-64(%sp) // arg7 = len -+ stw %r0,-68(%sp) // arg8 -+ load32 PA(.iodc_panic_ret), %rp -+ ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC -+ bv,n (%r1) -+.iodc_panic_ret: -+ b . /* wait endless with ... */ -+ or %r10,%r10,%r10 /* qemu idle sleep */ -+msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n" -+msg1_end: -+ -+$cpu_ok: -+#endif -+ -+ .level PA_ASM_LEVEL -+ - /* Initialize startup VM. Just map first 16/32 MB of memory */ - load32 PA(swapper_pg_dir),%r4 - mtctl %r4,%cr24 /* Initialize kernel root pointer */ -diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi -index 9f5bce1488d93..9bf37ef379509 100644 ---- a/arch/riscv/boot/dts/microchip/mpfs.dtsi -+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi -@@ -161,7 +161,7 @@ - ranges; - - cctrllr: cache-controller@2010000 { -- compatible = "sifive,fu540-c000-ccache", "cache"; -+ compatible = "microchip,mpfs-ccache", "sifive,fu540-c000-ccache", "cache"; - reg = <0x0 0x2010000 0x0 0x1000>; - cache-block-size = <64>; - cache-level = <2>; -diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c -index 53ed3884fe644..5d66e3947070c 100644 ---- a/arch/s390/kernel/nmi.c -+++ b/arch/s390/kernel/nmi.c -@@ -63,7 +63,7 @@ static inline unsigned long nmi_get_mcesa_size(void) - * structure. The structure is required for machine check happening - * early in the boot process. - */ --static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE); -+static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE); - - void __init nmi_alloc_mcesa_early(u64 *mcesad) - { -diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c -index 0a37f5de28631..3e0361db963ef 100644 ---- a/arch/s390/kernel/setup.c -+++ b/arch/s390/kernel/setup.c -@@ -486,6 +486,7 @@ static void __init setup_lowcore_dat_off(void) - put_abs_lowcore(restart_data, lc->restart_data); - put_abs_lowcore(restart_source, lc->restart_source); - put_abs_lowcore(restart_psw, lc->restart_psw); -+ put_abs_lowcore(mcesad, lc->mcesad); - - lc->spinlock_lockval = arch_spin_lockval(0); - lc->spinlock_index = 0; -diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h -index 4a23e52fe0ee1..ebc271bb6d8ed 100644 ---- a/arch/x86/include/asm/sev.h -+++ b/arch/x86/include/asm/sev.h -@@ -195,7 +195,7 @@ void snp_set_memory_shared(unsigned long vaddr, unsigned int npages); - void snp_set_memory_private(unsigned long vaddr, unsigned int npages); - void snp_set_wakeup_secondary_cpu(void); - bool snp_init(struct boot_params *bp); --void snp_abort(void); -+void __init __noreturn snp_abort(void); - int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err); - #else - static inline void sev_es_ist_enter(struct pt_regs *regs) { } -diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c -index 4f84c3f11af5b..a428c62330d37 100644 ---- a/arch/x86/kernel/sev.c -+++ b/arch/x86/kernel/sev.c -@@ -2112,7 +2112,7 @@ bool __init snp_init(struct boot_params *bp) - return true; - } - --void __init snp_abort(void) -+void __init __noreturn snp_abort(void) - { - sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); - } -diff --git a/block/partitions/core.c b/block/partitions/core.c -index 8a0ec929023bc..76617b1d2d47f 100644 ---- a/block/partitions/core.c -+++ b/block/partitions/core.c -@@ -597,6 +597,9 @@ static int blk_add_partitions(struct gendisk *disk) - if (disk->flags & GENHD_FL_NO_PART) - return 0; - -+ if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) -+ return 0; -+ - state = check_partition(disk); - if (!state) - return 0; -diff --git a/drivers/base/driver.c b/drivers/base/driver.c -index 15a75afe6b845..676b6275d5b53 100644 ---- a/drivers/base/driver.c -+++ b/drivers/base/driver.c -@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override, - if (len >= (PAGE_SIZE - 1)) - return -EINVAL; - -+ /* -+ * Compute the real length of the string in case userspace sends us a -+ * bunch of \0 characters like python likes to do. -+ */ -+ len = strlen(s); -+ - if (!len) { - /* Empty string passed - clear override */ - device_lock(dev); -diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c -index 719323bc6c7f1..37ab23a9d0345 100644 ---- a/drivers/base/regmap/regmap-spi.c -+++ b/drivers/base/regmap/regmap-spi.c -@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi, - const struct regmap_config *config) - { - size_t max_size = spi_max_transfer_size(spi); -+ size_t max_msg_size, reg_reserve_size; - struct regmap_bus *bus; - - if (max_size != SIZE_MAX) { -@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi, - if (!bus) - return ERR_PTR(-ENOMEM); - -+ max_msg_size = spi_max_message_size(spi); -+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE -+ + config->pad_bits / BITS_PER_BYTE; -+ if (max_size + reg_reserve_size > max_msg_size) -+ max_size -= reg_reserve_size; -+ - bus->free_on_exit = true; - bus->max_raw_read = max_size; - bus->max_raw_write = max_size; -+ - return bus; - } - -diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c -index 2cad427741647..f9fd1b6c15d42 100644 ---- a/drivers/cpufreq/cpufreq.c -+++ b/drivers/cpufreq/cpufreq.c -@@ -532,7 +532,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, - - target_freq = clamp_val(target_freq, policy->min, policy->max); - -- if (!cpufreq_driver->target_index) -+ if (!policy->freq_table) - return target_freq; - - idx = cpufreq_frequency_table_target(policy, target_freq, relation); -diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c -index 4dde8edd53b62..3e8d4b51a8140 100644 ---- a/drivers/firmware/efi/capsule-loader.c -+++ b/drivers/firmware/efi/capsule-loader.c -@@ -242,29 +242,6 @@ failed: - return ret; - } - --/** -- * efi_capsule_flush - called by file close or file flush -- * @file: file pointer -- * @id: not used -- * -- * If a capsule is being partially uploaded then calling this function -- * will be treated as upload termination and will free those completed -- * buffer pages and -ECANCELED will be returned. -- **/ --static int efi_capsule_flush(struct file *file, fl_owner_t id) --{ -- int ret = 0; -- struct capsule_info *cap_info = file->private_data; -- -- if (cap_info->index > 0) { -- pr_err("capsule upload not complete\n"); -- efi_free_all_buff_pages(cap_info); -- ret = -ECANCELED; -- } -- -- return ret; --} -- - /** - * efi_capsule_release - called by file close - * @inode: not used -@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file) - { - struct capsule_info *cap_info = file->private_data; - -+ if (cap_info->index > 0 && -+ (cap_info->header.headersize == 0 || -+ cap_info->count < cap_info->total_size)) { -+ pr_err("capsule upload not complete\n"); -+ efi_free_all_buff_pages(cap_info); -+ } -+ - kfree(cap_info->pages); - kfree(cap_info->phys); - kfree(file->private_data); -@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = { - .owner = THIS_MODULE, - .open = efi_capsule_open, - .write = efi_capsule_write, -- .flush = efi_capsule_flush, - .release = efi_capsule_release, - .llseek = no_llseek, - }; -diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile -index d0537573501e9..2c67f71f23753 100644 ---- a/drivers/firmware/efi/libstub/Makefile -+++ b/drivers/firmware/efi/libstub/Makefile -@@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ - $(call cc-option,-fno-addrsig) \ - -D__DISABLE_EXPORTS - -+# -+# struct randomization only makes sense for Linux internal types, which the EFI -+# stub code never touches, so let's turn off struct randomization for the stub -+# altogether -+# -+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS)) -+ - # remove SCS flags from all objects in this directory - KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS)) - # disable LTO -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index 3adebb63680e0..67d4a3c13ed19 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -@@ -2482,12 +2482,14 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) - if (!hive->reset_domain || - !amdgpu_reset_get_reset_domain(hive->reset_domain)) { - r = -ENOENT; -+ amdgpu_put_xgmi_hive(hive); - goto init_failed; - } - - /* Drop the early temporary reset domain we created for device */ - amdgpu_reset_put_reset_domain(adev->reset_domain); - adev->reset_domain = hive->reset_domain; -+ amdgpu_put_xgmi_hive(hive); - } - } - -@@ -4473,8 +4475,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, - retry: - amdgpu_amdkfd_pre_reset(adev); - -- amdgpu_amdkfd_pre_reset(adev); -- - if (from_hypervisor) - r = amdgpu_virt_request_full_gpu(adev, true); - else -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -index e9411c28d88ba..2b00f8fe15a89 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -@@ -2612,6 +2612,9 @@ static int psp_hw_fini(void *handle) - psp_rap_terminate(psp); - psp_dtm_terminate(psp); - psp_hdcp_terminate(psp); -+ -+ if (adev->gmc.xgmi.num_physical_nodes > 1) -+ psp_xgmi_terminate(psp); - } - - psp_asd_terminate(psp); -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c -index 1b108d03e7859..f2aebbf3fbe38 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c -@@ -742,7 +742,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) - amdgpu_put_xgmi_hive(hive); - } - -- return psp_xgmi_terminate(&adev->psp); -+ return 0; - } - - static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) -diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c -index a4a6751b1e449..30998ac47707c 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c -@@ -5090,9 +5090,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade - data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); - WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); - -- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); -- data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); -- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); -+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ -+ if (adev->sdma.num_instances > 1) { -+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); -+ data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1); -+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); -+ } - } else { - /* Program RLC_CGCG_CGLS_CTRL */ - def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL); -@@ -5121,9 +5124,12 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade - data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; - WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data); - -- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); -- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; -- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); -+ /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */ -+ if (adev->sdma.num_instances > 1) { -+ data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL); -+ data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK; -+ WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data); -+ } - } - } - -diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c -index 5349ca4d19e38..6d8ff3b099422 100644 ---- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c -@@ -2587,7 +2587,8 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) - - gfx_v9_0_tiling_mode_table_init(adev); - -- gfx_v9_0_setup_rb(adev); -+ if (adev->gfx.num_gfx_rings) -+ gfx_v9_0_setup_rb(adev); - gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); - adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); - -diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c -index 3f44a099c52a4..3e51e773f92be 100644 ---- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c -+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c -@@ -176,6 +176,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); - WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); - -+ tmp = mmVM_L2_CNTL3_DEFAULT; - if (adev->gmc.translate_further) { - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, -diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c -index c7a592d68febf..275bfb8ca6f89 100644 ---- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c -+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c -@@ -3188,7 +3188,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc) - &crc_win_y_end_fops); - debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, - &crc_win_update_fops); -- -+ dput(dir); - } - #endif - /* -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c -index 30c6f9cd717f3..27fbe906682f9 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c -@@ -41,6 +41,12 @@ - #define FN(reg_name, field) \ - FD(reg_name##__##field) - -+#include "logger_types.h" -+#undef DC_LOGGER -+#define DC_LOGGER \ -+ CTX->logger -+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } -+ - #define VBIOSSMC_MSG_TestMessage 0x1 - #define VBIOSSMC_MSG_GetSmuVersion 0x2 - #define VBIOSSMC_MSG_PowerUpGfx 0x3 -@@ -95,7 +101,13 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, - uint32_t result; - - result = rn_smu_wait_for_response(clk_mgr, 10, 200000); -- ASSERT(result == VBIOSSMC_Result_OK); -+ -+ if (result != VBIOSSMC_Result_OK) -+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result); -+ -+ if (result == VBIOSSMC_Status_BUSY) { -+ return -1; -+ } - - /* First clear response register */ - REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY); -@@ -176,6 +188,10 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque - VBIOSSMC_MSG_SetHardMinDcfclkByFreq, - khz_to_mhz_ceil(requested_dcfclk_khz)); - -+#ifdef DBG -+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -+#endif -+ - return actual_dcfclk_set_mhz * 1000; - } - -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c -index 1cae01a91a69d..e4f96b6fd79d0 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c -@@ -41,6 +41,12 @@ - #define FN(reg_name, field) \ - FD(reg_name##__##field) - -+#include "logger_types.h" -+#undef DC_LOGGER -+#define DC_LOGGER \ -+ CTX->logger -+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } -+ - #define VBIOSSMC_MSG_GetSmuVersion 0x2 - #define VBIOSSMC_MSG_SetDispclkFreq 0x4 - #define VBIOSSMC_MSG_SetDprefclkFreq 0x5 -@@ -96,6 +102,13 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, - - result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000); - -+ if (result != VBIOSSMC_Result_OK) -+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result); -+ -+ if (result == VBIOSSMC_Status_BUSY) { -+ return -1; -+ } -+ - /* First clear response register */ - REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY); - -@@ -167,6 +180,10 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request - VBIOSSMC_MSG_SetHardMinDcfclkByFreq, - khz_to_mhz_ceil(requested_dcfclk_khz)); - -+#ifdef DBG -+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -+#endif -+ - return actual_dcfclk_set_mhz * 1000; - } - -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c -index c5d7d075026f3..090b2c02aee17 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c -@@ -40,6 +40,12 @@ - #define FN(reg_name, field) \ - FD(reg_name##__##field) - -+#include "logger_types.h" -+#undef DC_LOGGER -+#define DC_LOGGER \ -+ CTX->logger -+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } -+ - #define VBIOSSMC_MSG_TestMessage 0x1 - #define VBIOSSMC_MSG_GetSmuVersion 0x2 - #define VBIOSSMC_MSG_PowerUpGfx 0x3 -@@ -102,7 +108,9 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, - uint32_t result; - - result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); -- ASSERT(result == VBIOSSMC_Result_OK); -+ -+ if (result != VBIOSSMC_Result_OK) -+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result); - - if (result == VBIOSSMC_Status_BUSY) { - return -1; -@@ -194,6 +202,10 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste - VBIOSSMC_MSG_SetHardMinDcfclkByFreq, - khz_to_mhz_ceil(requested_dcfclk_khz)); - -+#ifdef DBG -+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -+#endif -+ - return actual_dcfclk_set_mhz * 1000; - } - -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c -index 2600313fea579..925d6e13620ec 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c -@@ -70,6 +70,12 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D - #define REG_NBIO(reg_name) \ - (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) - -+#include "logger_types.h" -+#undef DC_LOGGER -+#define DC_LOGGER \ -+ CTX->logger -+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } -+ - #define mmMP1_C2PMSG_3 0x3B1050C - - #define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team -@@ -130,7 +136,9 @@ static int dcn315_smu_send_msg_with_param( - uint32_t result; - - result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); -- ASSERT(result == VBIOSSMC_Result_OK); -+ -+ if (result != VBIOSSMC_Result_OK) -+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result); - - if (result == VBIOSSMC_Status_BUSY) { - return -1; -@@ -197,6 +205,10 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request - VBIOSSMC_MSG_SetHardMinDcfclkByFreq, - khz_to_mhz_ceil(requested_dcfclk_khz)); - -+#ifdef DBG -+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -+#endif -+ - return actual_dcfclk_set_mhz * 1000; - } - -diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c -index dceec4b960527..457a9254ae1c8 100644 ---- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c -+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c -@@ -58,6 +58,12 @@ static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0x00DC0000, 0x00E0000 - #define FN(reg_name, field) \ - FD(reg_name##__##field) - -+#include "logger_types.h" -+#undef DC_LOGGER -+#define DC_LOGGER \ -+ CTX->logger -+#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } -+ - #define VBIOSSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team - #define VBIOSSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version - #define VBIOSSMC_MSG_Spare0 0x03 ///< Spare0 -@@ -118,7 +124,9 @@ static int dcn316_smu_send_msg_with_param( - uint32_t result; - - result = dcn316_smu_wait_for_response(clk_mgr, 10, 200000); -- ASSERT(result == VBIOSSMC_Result_OK); -+ -+ if (result != VBIOSSMC_Result_OK) -+ smu_print("SMU Response was not OK. SMU response after wait received is: %d\n", result); - - if (result == VBIOSSMC_Status_BUSY) { - return -1; -@@ -183,6 +191,10 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request - VBIOSSMC_MSG_SetHardMinDcfclkByFreq, - khz_to_mhz_ceil(requested_dcfclk_khz)); - -+#ifdef DBG -+ smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000); -+#endif -+ - return actual_dcfclk_set_mhz * 1000; - } - -diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c -index 86d670c712867..ad068865ba206 100644 ---- a/drivers/gpu/drm/drm_gem.c -+++ b/drivers/gpu/drm/drm_gem.c -@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct drm_device *dev, - } - EXPORT_SYMBOL(drm_gem_private_object_init); - --static void --drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) --{ -- /* -- * Note: obj->dma_buf can't disappear as long as we still hold a -- * handle reference in obj->handle_count. -- */ -- mutex_lock(&filp->prime.lock); -- if (obj->dma_buf) { -- drm_prime_remove_buf_handle_locked(&filp->prime, -- obj->dma_buf); -- } -- mutex_unlock(&filp->prime.lock); --} -- - /** - * drm_gem_object_handle_free - release resources bound to userspace handles - * @obj: GEM object to clean up. -@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) - if (obj->funcs->close) - obj->funcs->close(obj, file_priv); - -- drm_gem_remove_prime_handles(obj, file_priv); -+ drm_prime_remove_buf_handle(&file_priv->prime, id); - drm_vma_node_revoke(&obj->vma_node, file_priv); - - drm_gem_object_handle_put_unlocked(obj); -diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h -index 1fbbc19f1ac09..7bb98e6a446d0 100644 ---- a/drivers/gpu/drm/drm_internal.h -+++ b/drivers/gpu/drm/drm_internal.h -@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, - - void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); - void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); --void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, -- struct dma_buf *dma_buf); -+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, -+ uint32_t handle); - - /* drm_drv.c */ - struct drm_minor *drm_minor_acquire(unsigned int minor_id); -diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c -index e3f09f18110c7..bd5366b16381b 100644 ---- a/drivers/gpu/drm/drm_prime.c -+++ b/drivers/gpu/drm/drm_prime.c -@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri - return -ENOENT; - } - --void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, -- struct dma_buf *dma_buf) -+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, -+ uint32_t handle) - { - struct rb_node *rb; - -- rb = prime_fpriv->dmabufs.rb_node; -+ mutex_lock(&prime_fpriv->lock); -+ -+ rb = prime_fpriv->handles.rb_node; - while (rb) { - struct drm_prime_member *member; - -- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); -- if (member->dma_buf == dma_buf) { -+ member = rb_entry(rb, struct drm_prime_member, handle_rb); -+ if (member->handle == handle) { - rb_erase(&member->handle_rb, &prime_fpriv->handles); - rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); - -- dma_buf_put(dma_buf); -+ dma_buf_put(member->dma_buf); - kfree(member); -- return; -- } else if (member->dma_buf < dma_buf) { -+ break; -+ } else if (member->handle < handle) { - rb = rb->rb_right; - } else { - rb = rb->rb_left; - } - } -+ -+ mutex_unlock(&prime_fpriv->lock); - } - - void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) -diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c -index 0c5638f5b72bc..91caf4523b34d 100644 ---- a/drivers/gpu/drm/i915/display/intel_bios.c -+++ b/drivers/gpu/drm/i915/display/intel_bios.c -@@ -478,6 +478,13 @@ init_bdb_block(struct drm_i915_private *i915, - - block_size = get_blocksize(block); - -+ /* -+ * Version number and new block size are considered -+ * part of the header for MIPI sequenece block v3+. -+ */ -+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3) -+ block_size += 5; -+ - entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3), - GFP_KERNEL); - if (!entry) { -diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c -index 9feaf1a589f38..d213d8ad1ea53 100644 ---- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c -+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c -@@ -671,6 +671,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, - intel_dp_compute_rate(intel_dp, crtc_state->port_clock, - &link_bw, &rate_select); - -+ /* -+ * WaEdpLinkRateDataReload -+ * -+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs -+ * to snoop the link rates reported by the sink when we -+ * use LINK_RATE_SET in order to operate in jitter cleaning -+ * mode (as opposed to redriver mode). Unfortunately it -+ * loses track of the snooped link rates when powered down, -+ * so we need to make it re-snoop often. Without this high -+ * link rates are not stable. -+ */ -+ if (!link_bw) { -+ struct intel_connector *connector = intel_dp->attached_connector; -+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; -+ -+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", -+ connector->base.base.id, connector->base.name); -+ -+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, -+ sink_rates, sizeof(sink_rates)); -+ } -+ - if (link_bw) - drm_dbg_kms(&i915->drm, - "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n", -diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c -index 40e2e28ee6c75..bf01780e7ea56 100644 ---- a/drivers/gpu/drm/i915/gt/intel_llc.c -+++ b/drivers/gpu/drm/i915/gt/intel_llc.c -@@ -12,6 +12,7 @@ - #include "intel_llc.h" - #include "intel_mchbar_regs.h" - #include "intel_pcode.h" -+#include "intel_rps.h" - - struct ia_constants { - unsigned int min_gpu_freq; -@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc, - if (!HAS_LLC(i915) || IS_DGFX(i915)) - return false; - -- if (rps->max_freq <= rps->min_freq) -- return false; -- - consts->max_ia_freq = cpu_max_MHz(); - - consts->min_ring_freq = -@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc, - /* convert DDR frequency from units of 266.6MHz to bandwidth */ - consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3); - -- consts->min_gpu_freq = rps->min_freq; -- consts->max_gpu_freq = rps->max_freq; -- if (GRAPHICS_VER(i915) >= 9) { -- /* Convert GT frequency to 50 HZ units */ -- consts->min_gpu_freq /= GEN9_FREQ_SCALER; -- consts->max_gpu_freq /= GEN9_FREQ_SCALER; -- } -+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps); -+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps); - - return true; - } -@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc) - if (!get_ia_constants(llc, &consts)) - return; - -+ /* -+ * Although this is unlikely on any platform during initialization, -+ * let's ensure we don't get accidentally into infinite loop -+ */ -+ if (consts.max_gpu_freq <= consts.min_gpu_freq) -+ return; - /* - * For each potential GPU frequency, load a ring frequency we'd like - * to use for memory access. We do this by specifying the IA frequency -diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c -index 3476a11f294ce..7c068cc64c2fa 100644 ---- a/drivers/gpu/drm/i915/gt/intel_rps.c -+++ b/drivers/gpu/drm/i915/gt/intel_rps.c -@@ -2123,6 +2123,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps) - return intel_gpu_freq(rps, rps->max_freq_softlimit); - } - -+/** -+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format. -+ * @rps: the intel_rps structure -+ * -+ * Returns the max frequency in a raw format. In newer platforms raw is in -+ * units of 50 MHz. -+ */ -+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps) -+{ -+ struct intel_guc_slpc *slpc = rps_to_slpc(rps); -+ u32 freq; -+ -+ if (rps_uses_slpc(rps)) { -+ return DIV_ROUND_CLOSEST(slpc->rp0_freq, -+ GT_FREQUENCY_MULTIPLIER); -+ } else { -+ freq = rps->max_freq; -+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { -+ /* Convert GT frequency to 50 MHz units */ -+ freq /= GEN9_FREQ_SCALER; -+ } -+ return freq; -+ } -+} -+ - u32 intel_rps_get_rp0_frequency(struct intel_rps *rps) - { - struct intel_guc_slpc *slpc = rps_to_slpc(rps); -@@ -2211,6 +2236,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps) - return intel_gpu_freq(rps, rps->min_freq_softlimit); - } - -+/** -+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format. -+ * @rps: the intel_rps structure -+ * -+ * Returns the min frequency in a raw format. In newer platforms raw is in -+ * units of 50 MHz. -+ */ -+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps) -+{ -+ struct intel_guc_slpc *slpc = rps_to_slpc(rps); -+ u32 freq; -+ -+ if (rps_uses_slpc(rps)) { -+ return DIV_ROUND_CLOSEST(slpc->min_freq, -+ GT_FREQUENCY_MULTIPLIER); -+ } else { -+ freq = rps->min_freq; -+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { -+ /* Convert GT frequency to 50 MHz units */ -+ freq /= GEN9_FREQ_SCALER; -+ } -+ return freq; -+ } -+} -+ - static int set_min_freq(struct intel_rps *rps, u32 val) - { - int ret = 0; -diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h -index 1e8d564913083..4509dfdc52e09 100644 ---- a/drivers/gpu/drm/i915/gt/intel_rps.h -+++ b/drivers/gpu/drm/i915/gt/intel_rps.h -@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1); - u32 intel_rps_read_actual_frequency(struct intel_rps *rps); - u32 intel_rps_get_requested_frequency(struct intel_rps *rps); - u32 intel_rps_get_min_frequency(struct intel_rps *rps); -+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps); - int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val); - u32 intel_rps_get_max_frequency(struct intel_rps *rps); -+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps); - int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val); - u32 intel_rps_get_rp0_frequency(struct intel_rps *rps); - u32 intel_rps_get_rp1_frequency(struct intel_rps *rps); -diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c -index 429644d5ddc69..9fba16cb3f1e7 100644 ---- a/drivers/gpu/drm/radeon/radeon_device.c -+++ b/drivers/gpu/drm/radeon/radeon_device.c -@@ -1604,6 +1604,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, - if (r) { - /* delay GPU reset to resume */ - radeon_fence_driver_force_completion(rdev, i); -+ } else { -+ /* finish executing delayed work */ -+ flush_delayed_work(&rdev->fence_drv[i].lockup_work); - } - } - -diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c -index 3633ab691662b..81e688975c6a7 100644 ---- a/drivers/hwmon/asus-ec-sensors.c -+++ b/drivers/hwmon/asus-ec-sensors.c -@@ -54,6 +54,10 @@ static char *mutex_path_override; - /* ACPI mutex for locking access to the EC for the firmware */ - #define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX" - -+#define ASUS_HW_ACCESS_MUTEX_RMTW_ASMX "\\RMTW.ASMX" -+ -+#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0 "\\_SB_.PCI0.SBRG.SIO1.MUT0" -+ - #define MAX_IDENTICAL_BOARD_VARIATIONS 3 - - /* Moniker for the ACPI global lock (':' is not allowed in ASL identifiers) */ -@@ -119,6 +123,18 @@ enum ec_sensors { - ec_sensor_temp_water_in, - /* "Water_Out" temperature sensor reading [℃] */ - ec_sensor_temp_water_out, -+ /* "Water_Block_In" temperature sensor reading [℃] */ -+ ec_sensor_temp_water_block_in, -+ /* "Water_Block_Out" temperature sensor reading [℃] */ -+ ec_sensor_temp_water_block_out, -+ /* "T_sensor_2" temperature sensor reading [℃] */ -+ ec_sensor_temp_t_sensor_2, -+ /* "Extra_1" temperature sensor reading [℃] */ -+ ec_sensor_temp_sensor_extra_1, -+ /* "Extra_2" temperature sensor reading [℃] */ -+ ec_sensor_temp_sensor_extra_2, -+ /* "Extra_3" temperature sensor reading [℃] */ -+ ec_sensor_temp_sensor_extra_3, - }; - - #define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset) -@@ -134,11 +150,19 @@ enum ec_sensors { - #define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu) - #define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in) - #define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out) -+#define SENSOR_TEMP_WATER_BLOCK_IN BIT(ec_sensor_temp_water_block_in) -+#define SENSOR_TEMP_WATER_BLOCK_OUT BIT(ec_sensor_temp_water_block_out) -+#define SENSOR_TEMP_T_SENSOR_2 BIT(ec_sensor_temp_t_sensor_2) -+#define SENSOR_TEMP_SENSOR_EXTRA_1 BIT(ec_sensor_temp_sensor_extra_1) -+#define SENSOR_TEMP_SENSOR_EXTRA_2 BIT(ec_sensor_temp_sensor_extra_2) -+#define SENSOR_TEMP_SENSOR_EXTRA_3 BIT(ec_sensor_temp_sensor_extra_3) - - enum board_family { - family_unknown, - family_amd_400_series, - family_amd_500_series, -+ family_intel_300_series, -+ family_intel_600_series - }; - - /* All the known sensors for ASUS EC controllers */ -@@ -195,15 +219,54 @@ static const struct ec_sensor_info sensors_family_amd_500[] = { - EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00), - [ec_sensor_temp_water_out] = - EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01), -+ [ec_sensor_temp_water_block_in] = -+ EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02), -+ [ec_sensor_temp_water_block_out] = -+ EC_SENSOR("Water_Block_Out", hwmon_temp, 1, 0x01, 0x03), -+ [ec_sensor_temp_sensor_extra_1] = -+ EC_SENSOR("Extra_1", hwmon_temp, 1, 0x01, 0x09), -+ [ec_sensor_temp_t_sensor_2] = -+ EC_SENSOR("T_sensor_2", hwmon_temp, 1, 0x01, 0x0a), -+ [ec_sensor_temp_sensor_extra_2] = -+ EC_SENSOR("Extra_2", hwmon_temp, 1, 0x01, 0x0b), -+ [ec_sensor_temp_sensor_extra_3] = -+ EC_SENSOR("Extra_3", hwmon_temp, 1, 0x01, 0x0c), -+}; -+ -+static const struct ec_sensor_info sensors_family_intel_300[] = { -+ [ec_sensor_temp_chipset] = -+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a), -+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b), -+ [ec_sensor_temp_mb] = -+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c), -+ [ec_sensor_temp_t_sensor] = -+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d), -+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e), -+ [ec_sensor_fan_cpu_opt] = -+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0), -+ [ec_sensor_fan_vrm_hs] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2), -+ [ec_sensor_fan_water_flow] = -+ EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc), -+ [ec_sensor_temp_water_in] = -+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00), -+ [ec_sensor_temp_water_out] = -+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01), -+}; -+ -+static const struct ec_sensor_info sensors_family_intel_600[] = { -+ [ec_sensor_temp_t_sensor] = -+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d), -+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e), - }; - - /* Shortcuts for common combinations */ - #define SENSOR_SET_TEMP_CHIPSET_CPU_MB \ - (SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB) - #define SENSOR_SET_TEMP_WATER (SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT) -+#define SENSOR_SET_WATER_BLOCK \ -+ (SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT) - - struct ec_board_info { -- const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS]; - unsigned long sensors; - /* - * Defines which mutex to use for guarding access to the state and the -@@ -216,121 +279,194 @@ struct ec_board_info { - enum board_family family; - }; - --static const struct ec_board_info board_info[] = { -- { -- .board_names = {"PRIME X470-PRO"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -- SENSOR_FAN_CPU_OPT | -- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH, -- .family = family_amd_400_series, -- }, -- { -- .board_names = {"PRIME X570-PRO"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM | -- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ProArt X570-CREATOR WIFI"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM | -- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT | -- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -- }, -- { -- .board_names = {"Pro WS X570-ACE"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM | -- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET | -- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG CROSSHAIR VIII DARK HERO"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | -- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | -- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW | -- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = { -- "ROG CROSSHAIR VIII FORMULA", -- "ROG CROSSHAIR VIII HERO", -- "ROG CROSSHAIR VIII HERO (WI-FI)", -- }, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | -- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | -- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | -- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | -- SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG CROSSHAIR VIII IMPACT"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | -- SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG STRIX B550-E GAMING"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -- SENSOR_FAN_CPU_OPT, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG STRIX B550-I GAMING"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU | -- SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG STRIX X570-E GAMING"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | -- SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG STRIX X570-E GAMING WIFI II"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU | -- SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG STRIX X570-F GAMING"}, -- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- { -- .board_names = {"ROG STRIX X570-I GAMING"}, -- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_FAN_VRM_HS | -- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | -- SENSOR_IN_CPU_CORE, -- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -- .family = family_amd_500_series, -- }, -- {} -+static const struct ec_board_info board_info_prime_x470_pro = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -+ SENSOR_FAN_CPU_OPT | -+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH, -+ .family = family_amd_400_series, -+}; -+ -+static const struct ec_board_info board_info_prime_x570_pro = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM | -+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM | -+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT | -+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_pro_ws_x570_ace = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM | -+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET | -+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_crosshair_viii_dark_hero = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | -+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | -+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW | -+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_crosshair_viii_hero = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | -+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | -+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | -+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | -+ SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_maximus_xi_hero = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | -+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | -+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_intel_300_series, -+}; -+ -+static const struct ec_board_info board_info_crosshair_viii_impact = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | -+ SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_b550_e_gaming = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -+ SENSOR_FAN_CPU_OPT, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_b550_i_gaming = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU | -+ SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_x570_e_gaming = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM | -+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU | -+ SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU | -+ SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_x570_f_gaming = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | -+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_x570_i_gaming = { -+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM | -+ SENSOR_TEMP_T_SENSOR | -+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET | -+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX, -+ .family = family_amd_500_series, -+}; -+ -+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = { -+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX, -+ .family = family_intel_600_series, -+}; -+ -+static const struct ec_board_info board_info_zenith_ii_extreme = { -+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR | -+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER | -+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS | -+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE | -+ SENSOR_SET_WATER_BLOCK | -+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 | -+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3, -+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0, -+ .family = family_amd_500_series, -+}; -+ -+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \ -+ { \ -+ .matches = { \ -+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, \ -+ "ASUSTeK COMPUTER INC."), \ -+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \ -+ }, \ -+ .driver_data = (void *)board_info, \ -+ } -+ -+static const struct dmi_system_id dmi_table[] = { -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO", -+ &board_info_prime_x470_pro), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO", -+ &board_info_prime_x570_pro), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI", -+ &board_info_pro_art_x570_creator_wifi), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE", -+ &board_info_pro_ws_x570_ace), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO", -+ &board_info_crosshair_viii_dark_hero), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA", -+ &board_info_crosshair_viii_hero), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO", -+ &board_info_crosshair_viii_hero), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)", -+ &board_info_crosshair_viii_hero), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO", -+ &board_info_maximus_xi_hero), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)", -+ &board_info_maximus_xi_hero), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT", -+ &board_info_crosshair_viii_impact), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING", -+ &board_info_strix_b550_e_gaming), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING", -+ &board_info_strix_b550_i_gaming), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING", -+ &board_info_strix_x570_e_gaming), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II", -+ &board_info_strix_x570_e_gaming_wifi_ii), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING", -+ &board_info_strix_x570_f_gaming), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING", -+ &board_info_strix_x570_i_gaming), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4", -+ &board_info_strix_z690_a_gaming_wifi_d4), -+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME", -+ &board_info_zenith_ii_extreme), -+ {}, - }; - - struct ec_sensor { -@@ -441,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec, - return -ENOENT; - } - --static int __init bank_compare(const void *a, const void *b) -+static int bank_compare(const void *a, const void *b) - { - return *((const s8 *)a) - *((const s8 *)b); - } - --static void __init setup_sensor_data(struct ec_sensors_data *ec) -+static void setup_sensor_data(struct ec_sensors_data *ec) - { - struct ec_sensor *s = ec->sensors; - bool bank_found; -@@ -478,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec) - sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL); - } - --static void __init fill_ec_registers(struct ec_sensors_data *ec) -+static void fill_ec_registers(struct ec_sensors_data *ec) - { - const struct ec_sensor_info *si; - unsigned int i, j, register_idx = 0; -@@ -493,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec) - } - } - --static int __init setup_lock_data(struct device *dev) -+static int setup_lock_data(struct device *dev) - { - const char *mutex_path; - int status; -@@ -716,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata, - return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0; - } - --static int __init -+static int - asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan, - struct device *dev, int num, - enum hwmon_sensor_types type, u32 config) -@@ -745,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = { - .ops = &asus_ec_hwmon_ops, - }; - --static const struct ec_board_info * __init get_board_info(void) -+static const struct ec_board_info *get_board_info(void) - { -- const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); -- const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME); -- const struct ec_board_info *board; -- -- if (!dmi_board_vendor || !dmi_board_name || -- strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC.")) -- return NULL; -- -- for (board = board_info; board->sensors; board++) { -- if (match_string(board->board_names, -- MAX_IDENTICAL_BOARD_VARIATIONS, -- dmi_board_name) >= 0) -- return board; -- } -+ const struct dmi_system_id *dmi_entry; - -- return NULL; -+ dmi_entry = dmi_first_match(dmi_table); -+ return dmi_entry ? dmi_entry->driver_data : NULL; - } - --static int __init asus_ec_probe(struct platform_device *pdev) -+static int asus_ec_probe(struct platform_device *pdev) - { - const struct hwmon_channel_info **ptr_asus_ec_ci; - int nr_count[hwmon_max] = { 0 }, nr_types = 0; -@@ -799,6 +923,12 @@ static int __init asus_ec_probe(struct platform_device *pdev) - case family_amd_500_series: - ec_data->sensors_info = sensors_family_amd_500; - break; -+ case family_intel_300_series: -+ ec_data->sensors_info = sensors_family_intel_300; -+ break; -+ case family_intel_600_series: -+ ec_data->sensors_info = sensors_family_intel_600; -+ break; - default: - dev_err(dev, "Unknown board family: %d", - ec_data->board_info->family); -@@ -868,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev) - return PTR_ERR_OR_ZERO(hwdev); - } - -- --static const struct acpi_device_id acpi_ec_ids[] = { -- /* Embedded Controller Device */ -- { "PNP0C09", 0 }, -- {} --}; -+MODULE_DEVICE_TABLE(dmi, dmi_table); - - static struct platform_driver asus_ec_sensors_platform_driver = { - .driver = { - .name = "asus-ec-sensors", -- .acpi_match_table = acpi_ec_ids, - }, -+ .probe = asus_ec_probe, - }; - --MODULE_DEVICE_TABLE(acpi, acpi_ec_ids); --/* -- * we use module_platform_driver_probe() rather than module_platform_driver() -- * because the probe function (and its dependants) are marked with __init, which -- * means we can't put it into the .probe member of the platform_driver struct -- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init -- * because the object is referenced from the module exit code. -- */ --module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe); -+static struct platform_device *asus_ec_sensors_platform_device; -+ -+static int __init asus_ec_init(void) -+{ -+ asus_ec_sensors_platform_device = -+ platform_create_bundle(&asus_ec_sensors_platform_driver, -+ asus_ec_probe, NULL, 0, NULL, 0); -+ -+ if (IS_ERR(asus_ec_sensors_platform_device)) -+ return PTR_ERR(asus_ec_sensors_platform_device); -+ -+ return 0; -+} -+ -+static void __exit asus_ec_exit(void) -+{ -+ platform_device_unregister(asus_ec_sensors_platform_device); -+ platform_driver_unregister(&asus_ec_sensors_platform_driver); -+} -+ -+module_init(asus_ec_init); -+module_exit(asus_ec_exit); - - module_param_named(mutex_path, mutex_path_override, charp, 0); - MODULE_PARM_DESC(mutex_path, -diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c -index 26278b0f17a98..9259779cc2dff 100644 ---- a/drivers/hwmon/mr75203.c -+++ b/drivers/hwmon/mr75203.c -@@ -68,8 +68,9 @@ - - /* VM Individual Macro Register */ - #define VM_COM_REG_SIZE 0x200 --#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n)) --#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n)) -+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm)) -+#define VM_SDIF_DATA(vm, ch) \ -+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch)) - - /* SDA Slave Register */ - #define IP_CTRL 0x00 -@@ -115,6 +116,7 @@ struct pvt_device { - u32 t_num; - u32 p_num; - u32 v_num; -+ u32 c_num; - u32 ip_freq; - u8 *vm_idx; - }; -@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) - { - struct pvt_device *pvt = dev_get_drvdata(dev); - struct regmap *v_map = pvt->v_map; -+ u8 vm_idx, ch_idx; - u32 n, stat; -- u8 vm_idx; - int ret; - -- if (channel >= pvt->v_num) -+ if (channel >= pvt->v_num * pvt->c_num) - return -EINVAL; - -- vm_idx = pvt->vm_idx[channel]; -+ vm_idx = pvt->vm_idx[channel / pvt->c_num]; -+ ch_idx = channel % pvt->c_num; - - switch (attr) { - case hwmon_in_input: -@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val) - if (ret) - return ret; - -- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n); -+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n); - if(ret < 0) - return ret; - - n &= SAMPLE_DATA_MSK; -- /* Convert the N bitstream count into voltage */ -- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS; -+ /* -+ * Convert the N bitstream count into voltage. -+ * To support negative voltage calculation for 64bit machines -+ * n must be cast to long, since n and *val differ both in -+ * signedness and in size. -+ * Division is used instead of right shift, because for signed -+ * numbers, the sign bit is used to fill the vacated bit -+ * positions, and if the number is negative, 1 is used. -+ * BIT(x) may not be used instead of (1 << x) because it's -+ * unsigned. -+ */ -+ *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS); - - return 0; - default: -@@ -375,6 +388,19 @@ static int pvt_init(struct pvt_device *pvt) - if (ret) - return ret; - -+ val = (BIT(pvt->c_num) - 1) | VM_CH_INIT | -+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG; -+ ret = regmap_write(v_map, SDIF_W, val); -+ if (ret < 0) -+ return ret; -+ -+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT, -+ val, !(val & SDIF_BUSY), -+ PVT_POLL_DELAY_US, -+ PVT_POLL_TIMEOUT_US); -+ if (ret) -+ return ret; -+ - val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT | - CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT | - SDIF_WRN_W | SDIF_PROG; -@@ -489,8 +515,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt - - static int mr75203_probe(struct platform_device *pdev) - { -+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i; - const struct hwmon_channel_info **pvt_info; -- u32 ts_num, vm_num, pd_num, val, index, i; - struct device *dev = &pdev->dev; - u32 *temp_config, *in_config; - struct device *hwmon_dev; -@@ -531,9 +557,11 @@ static int mr75203_probe(struct platform_device *pdev) - ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT; - pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT; - vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT; -+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT; - pvt->t_num = ts_num; - pvt->p_num = pd_num; - pvt->v_num = vm_num; -+ pvt->c_num = ch_num; - val = 0; - if (ts_num) - val++; -@@ -570,7 +598,7 @@ static int mr75203_probe(struct platform_device *pdev) - } - - if (vm_num) { -- u32 num = vm_num; -+ u32 total_ch; - - ret = pvt_get_regmap(pdev, "vm", pvt); - if (ret) -@@ -584,30 +612,30 @@ static int mr75203_probe(struct platform_device *pdev) - ret = device_property_read_u8_array(dev, "intel,vm-map", - pvt->vm_idx, vm_num); - if (ret) { -- num = 0; -+ /* -+ * Incase intel,vm-map property is not defined, we -+ * assume incremental channel numbers. -+ */ -+ for (i = 0; i < vm_num; i++) -+ pvt->vm_idx[i] = i; - } else { - for (i = 0; i < vm_num; i++) - if (pvt->vm_idx[i] >= vm_num || - pvt->vm_idx[i] == 0xff) { -- num = i; -+ pvt->v_num = i; -+ vm_num = i; - break; - } - } - -- /* -- * Incase intel,vm-map property is not defined, we assume -- * incremental channel numbers. -- */ -- for (i = num; i < vm_num; i++) -- pvt->vm_idx[i] = i; -- -- in_config = devm_kcalloc(dev, num + 1, -+ total_ch = ch_num * vm_num; -+ in_config = devm_kcalloc(dev, total_ch + 1, - sizeof(*in_config), GFP_KERNEL); - if (!in_config) - return -ENOMEM; - -- memset32(in_config, HWMON_I_INPUT, num); -- in_config[num] = 0; -+ memset32(in_config, HWMON_I_INPUT, total_ch); -+ in_config[total_ch] = 0; - pvt_in.config = in_config; - - pvt_info[index++] = &pvt_in; -diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c -index 8bd6435c13e82..2148fd543bb4b 100644 ---- a/drivers/hwmon/tps23861.c -+++ b/drivers/hwmon/tps23861.c -@@ -489,18 +489,20 @@ static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port) - - static int tps23861_port_resistance(struct tps23861_data *data, int port) - { -- u16 regval; -+ unsigned int raw_val; -+ __le16 regval; - - regmap_bulk_read(data->regmap, - PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1), - ®val, - 2); - -- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) { -+ raw_val = le16_to_cpu(regval); -+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) { - case PORT_RESISTANCE_RSN_OTHER: -- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000; -+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000; - case PORT_RESISTANCE_RSN_LOW: -- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000; -+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000; - case PORT_RESISTANCE_RSN_SHORT: - case PORT_RESISTANCE_RSN_OPEN: - default: -diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c -index fabca5e51e3d4..4dd133eccfdfb 100644 ---- a/drivers/infiniband/core/cma.c -+++ b/drivers/infiniband/core/cma.c -@@ -1719,8 +1719,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, - } - - if (!validate_net_dev(*net_dev, -- (struct sockaddr *)&req->listen_addr_storage, -- (struct sockaddr *)&req->src_addr_storage)) { -+ (struct sockaddr *)&req->src_addr_storage, -+ (struct sockaddr *)&req->listen_addr_storage)) { - id_priv = ERR_PTR(-EHOSTUNREACH); - goto err; - } -diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c -index 186ed8859920c..d39e16c211e8a 100644 ---- a/drivers/infiniband/core/umem_odp.c -+++ b/drivers/infiniband/core/umem_odp.c -@@ -462,7 +462,7 @@ retry: - mutex_unlock(&umem_odp->umem_mutex); - - out_put_mm: -- mmput(owning_mm); -+ mmput_async(owning_mm); - out_put_task: - if (owning_process) - put_task_struct(owning_process); -diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h -index 2855e9ad4b328..1df076e70e293 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_device.h -+++ b/drivers/infiniband/hw/hns/hns_roce_device.h -@@ -730,7 +730,6 @@ struct hns_roce_caps { - u32 num_qps; - u32 num_pi_qps; - u32 reserved_qps; -- int num_qpc_timer; - u32 num_srqs; - u32 max_wqes; - u32 max_srq_wrs; -diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c -index b354caeaa9b29..49edff989f1f1 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c -+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c -@@ -1941,7 +1941,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) - - caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; - caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; -- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; -+ caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM; - caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM; - - caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; -@@ -2237,7 +2237,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) - caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); - caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); - caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); -- caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); - caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); - caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); - caps->num_aeq_vectors = resp_a->num_aeq_vectors; -diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h -index 7ffb7824d2689..e4b640caee1b7 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h -+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h -@@ -36,11 +36,11 @@ - #include - - #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 --#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 - #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 - #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 - #define HNS_ROCE_V2_MAX_SRQ_SGE 64 - #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 -+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100 - #define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100 - #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 - #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -@@ -83,7 +83,7 @@ - - #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE - #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE --#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 -+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 - #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 - #define HNS_ROCE_INVALID_LKEY 0x0 - #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 -diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c -index c8af4ebd7cbd3..4ccb217b2841d 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_main.c -+++ b/drivers/infiniband/hw/hns/hns_roce_main.c -@@ -725,7 +725,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) - ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table, - HEM_TYPE_QPC_TIMER, - hr_dev->caps.qpc_timer_entry_sz, -- hr_dev->caps.num_qpc_timer, 1); -+ hr_dev->caps.qpc_timer_bt_num, 1); - if (ret) { - dev_err(dev, - "Failed to init QPC timer memory, aborting.\n"); -diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c -index 48d3616a6d71d..7bee7f6c5e702 100644 ---- a/drivers/infiniband/hw/hns/hns_roce_qp.c -+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c -@@ -462,11 +462,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, - hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + - hr_qp->rq.rsv_sge); - -- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) -- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); -- else -- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * -- hr_qp->rq.max_gs); -+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * -+ hr_qp->rq.max_gs); - - hr_qp->rq.wqe_cnt = cnt; - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE && -diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c -index daeab5daed5bc..d003ad864ee44 100644 ---- a/drivers/infiniband/hw/irdma/uk.c -+++ b/drivers/infiniband/hw/irdma/uk.c -@@ -1005,6 +1005,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, - int ret_code; - bool move_cq_head = true; - u8 polarity; -+ u8 op_type; - bool ext_valid; - __le64 *ext_cqe; - -@@ -1187,7 +1188,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, - do { - __le64 *sw_wqe; - u64 wqe_qword; -- u8 op_type; - u32 tail; - - tail = qp->sq_ring.tail; -@@ -1204,6 +1204,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, - break; - } - } while (1); -+ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR) -+ info->minor_err = FLUSH_MW_BIND_ERR; - qp->sq_flush_seen = true; - if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) - qp->sq_flush_complete = true; -diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c -index ab3c5208a1231..f4d774451160d 100644 ---- a/drivers/infiniband/hw/irdma/utils.c -+++ b/drivers/infiniband/hw/irdma/utils.c -@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf, - cqp_error = cqp_request->compl_info.error; - if (cqp_error) { - err_code = -EIO; -- if (cqp_request->compl_info.maj_err_code == 0xFFFF && -- cqp_request->compl_info.min_err_code == 0x8029) { -- if (!rf->reset) { -- rf->reset = true; -- rf->gen_ops.request_reset(rf); -+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) { -+ if (cqp_request->compl_info.min_err_code == 0x8002) -+ err_code = -EBUSY; -+ else if (cqp_request->compl_info.min_err_code == 0x8029) { -+ if (!rf->reset) { -+ rf->reset = true; -+ rf->gen_ops.request_reset(rf); -+ } - } - } - } -@@ -2597,7 +2600,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) - spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); - if (compl_generated) -- irdma_comp_handler(iwqp->iwrcq); -+ irdma_comp_handler(iwqp->iwscq); - } else { - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); - mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, -diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c -index 227a799385d1d..ab73d1715f991 100644 ---- a/drivers/infiniband/hw/irdma/verbs.c -+++ b/drivers/infiniband/hw/irdma/verbs.c -@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev, - props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; - props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; - props->max_cq = rf->max_cq - rf->used_cqs; -- props->max_cqe = rf->max_cqe; -+ props->max_cqe = rf->max_cqe - 1; - props->max_mr = rf->max_mr - rf->used_mrs; - props->max_mw = props->max_mr; - props->max_pd = rf->max_pd - rf->used_pds; - props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; - props->max_qp_rd_atom = hw_attrs->max_hw_ird; - props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; -- if (rdma_protocol_roce(ibdev, 1)) -+ if (rdma_protocol_roce(ibdev, 1)) { -+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; - props->max_pkeys = IRDMA_PKEY_TBL_SZ; -+ } -+ - props->max_ah = rf->max_ah; - props->max_mcast_grp = rf->max_mcg; - props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; -@@ -3001,6 +3004,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) - struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; - struct irdma_cqp_request *cqp_request; - struct cqp_cmds_info *cqp_info; -+ int status; - - if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { - if (iwmr->region) { -@@ -3031,8 +3035,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) - cqp_info->post_sq = 1; - cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; - cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; -- irdma_handle_cqp_op(iwdev->rf, cqp_request); -+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request); - irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); -+ if (status) -+ return status; -+ - irdma_free_stag(iwdev, iwmr->stag); - done: - if (iwpbl->pbl_allocated) -diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c -index 293ed709e5ed5..b4dc52392275b 100644 ---- a/drivers/infiniband/hw/mlx5/mad.c -+++ b/drivers/infiniband/hw/mlx5/mad.c -@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num, - mdev = dev->mdev; - mdev_port_num = 1; - } -+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) { -+ /* set local port to one for Function-Per-Port HCA. */ -+ mdev = dev->mdev; -+ mdev_port_num = 1; -+ } -+ - /* Declaring support of extended counters */ - if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { - struct ib_class_port_info cpi = {}; -diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c -index 1f4e60257700e..7d47b521070b1 100644 ---- a/drivers/infiniband/sw/siw/siw_qp_tx.c -+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c -@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) - dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); - - if (paddr) -- return virt_to_page(paddr); -+ return virt_to_page((void *)paddr); - - return NULL; - } -@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) - kunmap_local(kaddr); - } - } else { -- u64 va = sge->laddr + sge_off; -+ /* -+ * Cast to an uintptr_t to preserve all 64 bits -+ * in sge->laddr. -+ */ -+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off); - -- page_array[seg] = virt_to_page(va & PAGE_MASK); -+ /* -+ * virt_to_page() takes a (void *) pointer -+ * so cast to a (void *) meaning it will be 64 -+ * bits on a 64 bit platform and 32 bits on a -+ * 32 bit platform. -+ */ -+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK)); - if (do_crc) - crypto_shash_update( - c_tx->mpa_crc_hd, -- (void *)(uintptr_t)va, -+ (void *)va, - plen); - } - -diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c -index 525f083fcaeb4..bf464400a4409 100644 ---- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c -+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c -@@ -1004,7 +1004,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path, - static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, - struct rtrs_clt_io_req *req, - struct rtrs_rbuf *rbuf, bool fr_en, -- u32 size, u32 imm, struct ib_send_wr *wr, -+ u32 count, u32 size, u32 imm, -+ struct ib_send_wr *wr, - struct ib_send_wr *tail) - { - struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); -@@ -1024,12 +1025,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, - num_sge = 2; - ptail = tail; - } else { -- for_each_sg(req->sglist, sg, req->sg_cnt, i) { -+ for_each_sg(req->sglist, sg, count, i) { - sge[i].addr = sg_dma_address(sg); - sge[i].length = sg_dma_len(sg); - sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; - } -- num_sge = 1 + req->sg_cnt; -+ num_sge = 1 + count; - } - sge[i].addr = req->iu->dma_addr; - sge[i].length = size; -@@ -1142,7 +1143,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) - */ - rtrs_clt_update_all_stats(req, WRITE); - -- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, -+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count, - req->usr_len + sizeof(*msg), - imm, wr, &inv_wr); - if (ret) { -diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c -index 24024bce25664..ee4876bdce4ac 100644 ---- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c -+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c -@@ -600,7 +600,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) - struct sg_table *sgt = &srv_mr->sgt; - struct scatterlist *s; - struct ib_mr *mr; -- int nr, chunks; -+ int nr, nr_sgt, chunks; - - chunks = chunks_per_mr * mri; - if (!always_invalidate) -@@ -615,19 +615,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) - sg_set_page(s, srv->chunks[chunks + i], - max_chunk_size, 0); - -- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, -+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, - sgt->nents, DMA_BIDIRECTIONAL); -- if (nr < sgt->nents) { -- err = nr < 0 ? nr : -EINVAL; -+ if (!nr_sgt) { -+ err = -EINVAL; - goto free_sg; - } - mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, -- sgt->nents); -+ nr_sgt); - if (IS_ERR(mr)) { - err = PTR_ERR(mr); - goto unmap_sg; - } -- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents, -+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt, - NULL, max_chunk_size); - if (nr < 0 || nr < sgt->nents) { - err = nr < 0 ? nr : -EINVAL; -@@ -646,7 +646,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path) - } - } - /* Eventually dma addr for each chunk can be cached */ -- for_each_sg(sgt->sgl, s, sgt->orig_nents, i) -+ for_each_sg(sgt->sgl, s, nr_sgt, i) - srv_path->dma_addr[chunks + i] = sg_dma_address(s); - - ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); -diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c -index 6058abf42ba74..3d9c108d73ad8 100644 ---- a/drivers/infiniband/ulp/srp/ib_srp.c -+++ b/drivers/infiniband/ulp/srp/ib_srp.c -@@ -1962,7 +1962,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) - if (scmnd) { - req = scsi_cmd_priv(scmnd); - scmnd = srp_claim_req(ch, req, NULL, scmnd); -- } else { -+ } -+ if (!scmnd) { - shost_printk(KERN_ERR, target->scsi_host, - "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", - rsp->tag, ch - target->ch, ch->qp->qp_num); -diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c -index 840831d5d2ad9..a0924144bac80 100644 ---- a/drivers/iommu/amd/iommu.c -+++ b/drivers/iommu/amd/iommu.c -@@ -874,7 +874,8 @@ static void build_completion_wait(struct iommu_cmd *cmd, - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(paddr); -- cmd->data[2] = data; -+ cmd->data[2] = lower_32_bits(data); -+ cmd->data[3] = upper_32_bits(data); - CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); - } - -diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c -index afb3efd565b78..f3e2689787ae5 100644 ---- a/drivers/iommu/amd/iommu_v2.c -+++ b/drivers/iommu/amd/iommu_v2.c -@@ -786,6 +786,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) - if (dev_state->domain == NULL) - goto out_free_states; - -+ /* See iommu_is_default_domain() */ -+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY; - amd_iommu_domain_direct_map(dev_state->domain); - - ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); -diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c -index 64b14ac4c7b02..fc8c1420c0b69 100644 ---- a/drivers/iommu/intel/dmar.c -+++ b/drivers/iommu/intel/dmar.c -@@ -2368,6 +2368,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert) - if (!dmar_in_use()) - return 0; - -+ /* -+ * It's unlikely that any I/O board is hot added before the IOMMU -+ * subsystem is initialized. -+ */ -+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled) -+ return -EOPNOTSUPP; -+ - if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { - tmp = handle; - } else { -diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c -index 5c0dce78586aa..40ac3a78d90ef 100644 ---- a/drivers/iommu/intel/iommu.c -+++ b/drivers/iommu/intel/iommu.c -@@ -422,14 +422,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain, - return !(addr_width < BITS_PER_LONG && pfn >> addr_width); - } - -+/* -+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU. -+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of -+ * the returned SAGAW. -+ */ -+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) -+{ -+ unsigned long fl_sagaw, sl_sagaw; -+ -+ fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0); -+ sl_sagaw = cap_sagaw(iommu->cap); -+ -+ /* Second level only. */ -+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) -+ return sl_sagaw; -+ -+ /* First level only. */ -+ if (!ecap_slts(iommu->ecap)) -+ return fl_sagaw; -+ -+ return fl_sagaw & sl_sagaw; -+} -+ - static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) - { - unsigned long sagaw; - int agaw; - -- sagaw = cap_sagaw(iommu->cap); -- for (agaw = width_to_agaw(max_gaw); -- agaw >= 0; agaw--) { -+ sagaw = __iommu_calculate_sagaw(iommu); -+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { - if (test_bit(agaw, &sagaw)) - break; - } -@@ -3123,13 +3145,7 @@ static int __init init_dmars(void) - - #ifdef CONFIG_INTEL_IOMMU_SVM - if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { -- /* -- * Call dmar_alloc_hwirq() with dmar_global_lock held, -- * could cause possible lock race condition. -- */ -- up_write(&dmar_global_lock); - ret = intel_svm_enable_prq(iommu); -- down_write(&dmar_global_lock); - if (ret) - goto free_iommu; - } -@@ -4035,7 +4051,6 @@ int __init intel_iommu_init(void) - force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) || - platform_optin_force_iommu(); - -- down_write(&dmar_global_lock); - if (dmar_table_init()) { - if (force_on) - panic("tboot: Failed to initialize DMAR table\n"); -@@ -4048,16 +4063,6 @@ int __init intel_iommu_init(void) - goto out_free_dmar; - } - -- up_write(&dmar_global_lock); -- -- /* -- * The bus notifier takes the dmar_global_lock, so lockdep will -- * complain later when we register it under the lock. -- */ -- dmar_register_bus_notifier(); -- -- down_write(&dmar_global_lock); -- - if (!no_iommu) - intel_iommu_debugfs_init(); - -@@ -4105,11 +4110,9 @@ int __init intel_iommu_init(void) - pr_err("Initialization failed\n"); - goto out_free_dmar; - } -- up_write(&dmar_global_lock); - - init_iommu_pm_ops(); - -- down_read(&dmar_global_lock); - for_each_active_iommu(iommu, drhd) { - /* - * The flush queue implementation does not perform -@@ -4127,13 +4130,11 @@ int __init intel_iommu_init(void) - "%s", iommu->name); - iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); - } -- up_read(&dmar_global_lock); - - bus_set_iommu(&pci_bus_type, &intel_iommu_ops); - if (si_domain && !hw_pass_through) - register_memory_notifier(&intel_iommu_memory_nb); - -- down_read(&dmar_global_lock); - if (probe_acpi_namespace_devices()) - pr_warn("ACPI name space devices didn't probe correctly\n"); - -@@ -4144,17 +4145,15 @@ int __init intel_iommu_init(void) - - iommu_disable_protect_mem_regions(iommu); - } -- up_read(&dmar_global_lock); -- -- pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); - - intel_iommu_enabled = 1; -+ dmar_register_bus_notifier(); -+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); - - return 0; - - out_free_dmar: - intel_iommu_free_dmars(); -- up_write(&dmar_global_lock); - return ret; - } - -diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c -index 847ad47a2dfd3..f113833c3075c 100644 ---- a/drivers/iommu/iommu.c -+++ b/drivers/iommu/iommu.c -@@ -3089,6 +3089,24 @@ out: - return ret; - } - -+static bool iommu_is_default_domain(struct iommu_group *group) -+{ -+ if (group->domain == group->default_domain) -+ return true; -+ -+ /* -+ * If the default domain was set to identity and it is still an identity -+ * domain then we consider this a pass. This happens because of -+ * amd_iommu_init_device() replacing the default idenytity domain with an -+ * identity domain that has a different configuration for AMDGPU. -+ */ -+ if (group->default_domain && -+ group->default_domain->type == IOMMU_DOMAIN_IDENTITY && -+ group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY) -+ return true; -+ return false; -+} -+ - /** - * iommu_device_use_default_domain() - Device driver wants to handle device - * DMA through the kernel DMA API. -@@ -3107,8 +3125,7 @@ int iommu_device_use_default_domain(struct device *dev) - - mutex_lock(&group->mutex); - if (group->owner_cnt) { -- if (group->domain != group->default_domain || -- group->owner) { -+ if (group->owner || !iommu_is_default_domain(group)) { - ret = -EBUSY; - goto unlock_out; - } -diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c -index 25be4b822aa07..bf340d779c10b 100644 ---- a/drivers/iommu/virtio-iommu.c -+++ b/drivers/iommu/virtio-iommu.c -@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) - return iommu_fwspec_add_ids(dev, args->args, 1); - } - -+static bool viommu_capable(enum iommu_cap cap) -+{ -+ switch (cap) { -+ case IOMMU_CAP_CACHE_COHERENCY: -+ return true; -+ default: -+ return false; -+ } -+} -+ - static struct iommu_ops viommu_ops = { -+ .capable = viommu_capable, - .domain_alloc = viommu_domain_alloc, - .probe_device = viommu_probe_device, - .probe_finalize = viommu_probe_finalize, -diff --git a/drivers/md/md.c b/drivers/md/md.c -index 91e7e80fce489..25d18b67a1620 100644 ---- a/drivers/md/md.c -+++ b/drivers/md/md.c -@@ -5647,6 +5647,7 @@ static int md_alloc(dev_t dev, char *name) - * removed (mddev_delayed_delete). - */ - flush_workqueue(md_misc_wq); -+ flush_workqueue(md_rdev_misc_wq); - - mutex_lock(&disks_mutex); - mddev = mddev_alloc(dev); -diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c -index 6ba4c83fe5fc0..bff0bfd10e235 100644 ---- a/drivers/net/bonding/bond_main.c -+++ b/drivers/net/bonding/bond_main.c -@@ -1974,6 +1974,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, - for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) - new_slave->target_last_arp_rx[i] = new_slave->last_rx; - -+ new_slave->last_tx = new_slave->last_rx; -+ - if (bond->params.miimon && !bond->params.use_carrier) { - link_reporting = bond_check_dev_link(bond, slave_dev, 1); - -@@ -2857,8 +2859,11 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, - return; - } - -- if (bond_handle_vlan(slave, tags, skb)) -+ if (bond_handle_vlan(slave, tags, skb)) { -+ slave_update_last_tx(slave); - arp_xmit(skb); -+ } -+ - return; - } - -@@ -3047,8 +3052,7 @@ static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, - curr_active_slave->last_link_up)) - bond_validate_arp(bond, slave, tip, sip); - else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) && -- bond_time_in_interval(bond, -- dev_trans_start(curr_arp_slave->dev), 1)) -+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) - bond_validate_arp(bond, slave, sip, tip); - - out_unlock: -@@ -3076,8 +3080,10 @@ static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr, - } - - addrconf_addr_solict_mult(daddr, &mcaddr); -- if (bond_handle_vlan(slave, tags, skb)) -+ if (bond_handle_vlan(slave, tags, skb)) { -+ slave_update_last_tx(slave); - ndisc_send_skb(skb, &mcaddr, saddr); -+ } - } - - static void bond_ns_send_all(struct bonding *bond, struct slave *slave) -@@ -3134,6 +3140,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave) - found: - if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr)) - bond_ns_send(slave, &targets[i], &saddr, tags); -+ else -+ bond_ns_send(slave, &targets[i], &in6addr_any, tags); -+ - dst_release(dst); - kfree(tags); - } -@@ -3165,12 +3174,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) - return ret; - } - --static void bond_validate_ns(struct bonding *bond, struct slave *slave, -+static void bond_validate_na(struct bonding *bond, struct slave *slave, - struct in6_addr *saddr, struct in6_addr *daddr) - { - int i; - -- if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) { -+ /* Ignore NAs that: -+ * 1. Source address is unspecified address. -+ * 2. Dest address is neither all-nodes multicast address nor -+ * exist on bond interface. -+ */ -+ if (ipv6_addr_any(saddr) || -+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) && -+ !bond_has_this_ip6(bond, daddr))) { - slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", - __func__, saddr, daddr); - return; -@@ -3213,15 +3229,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, - * see bond_arp_rcv(). - */ - if (bond_is_active_slave(slave)) -- bond_validate_ns(bond, slave, saddr, daddr); -+ bond_validate_na(bond, slave, saddr, daddr); - else if (curr_active_slave && - time_after(slave_last_rx(bond, curr_active_slave), - curr_active_slave->last_link_up)) -- bond_validate_ns(bond, slave, saddr, daddr); -+ bond_validate_na(bond, slave, saddr, daddr); - else if (curr_arp_slave && -- bond_time_in_interval(bond, -- dev_trans_start(curr_arp_slave->dev), 1)) -- bond_validate_ns(bond, slave, saddr, daddr); -+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) -+ bond_validate_na(bond, slave, saddr, daddr); - - out: - return RX_HANDLER_ANOTHER; -@@ -3308,12 +3323,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) - * so it can wait - */ - bond_for_each_slave_rcu(bond, slave, iter) { -- unsigned long trans_start = dev_trans_start(slave->dev); -+ unsigned long last_tx = slave_last_tx(slave); - - bond_propose_link_state(slave, BOND_LINK_NOCHANGE); - - if (slave->link != BOND_LINK_UP) { -- if (bond_time_in_interval(bond, trans_start, 1) && -+ if (bond_time_in_interval(bond, last_tx, 1) && - bond_time_in_interval(bond, slave->last_rx, 1)) { - - bond_propose_link_state(slave, BOND_LINK_UP); -@@ -3338,7 +3353,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) - * when the source ip is 0, so don't take the link down - * if we don't know our ip yet - */ -- if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) || -+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || - !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { - - bond_propose_link_state(slave, BOND_LINK_DOWN); -@@ -3404,7 +3419,7 @@ re_arm: - */ - static int bond_ab_arp_inspect(struct bonding *bond) - { -- unsigned long trans_start, last_rx; -+ unsigned long last_tx, last_rx; - struct list_head *iter; - struct slave *slave; - int commit = 0; -@@ -3455,9 +3470,9 @@ static int bond_ab_arp_inspect(struct bonding *bond) - * - (more than missed_max*delta since receive AND - * the bond has an IP address) - */ -- trans_start = dev_trans_start(slave->dev); -+ last_tx = slave_last_tx(slave); - if (bond_is_active_slave(slave) && -- (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) || -+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || - !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) { - bond_propose_link_state(slave, BOND_LINK_DOWN); - commit++; -@@ -3474,8 +3489,8 @@ static int bond_ab_arp_inspect(struct bonding *bond) - */ - static void bond_ab_arp_commit(struct bonding *bond) - { -- unsigned long trans_start; - struct list_head *iter; -+ unsigned long last_tx; - struct slave *slave; - - bond_for_each_slave(bond, slave, iter) { -@@ -3484,10 +3499,10 @@ static void bond_ab_arp_commit(struct bonding *bond) - continue; - - case BOND_LINK_UP: -- trans_start = dev_trans_start(slave->dev); -+ last_tx = slave_last_tx(slave); - if (rtnl_dereference(bond->curr_active_slave) != slave || - (!rtnl_dereference(bond->curr_active_slave) && -- bond_time_in_interval(bond, trans_start, 1))) { -+ bond_time_in_interval(bond, last_tx, 1))) { - struct slave *current_arp_slave; - - current_arp_slave = rtnl_dereference(bond->current_arp_slave); -diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c -index 6439b56f381f9..517bc3922ee24 100644 ---- a/drivers/net/dsa/ocelot/felix_vsc9959.c -+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c -@@ -16,11 +16,13 @@ - #include - #include - #include -+#include - #include "felix.h" - - #define VSC9959_NUM_PORTS 6 - - #define VSC9959_TAS_GCL_ENTRY_MAX 63 -+#define VSC9959_TAS_MIN_GATE_LEN_NS 33 - #define VSC9959_VCAP_POLICER_BASE 63 - #define VSC9959_VCAP_POLICER_MAX 383 - #define VSC9959_SWITCH_PCI_BAR 4 -@@ -1410,6 +1412,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) - mdiobus_free(felix->imdio); - } - -+/* The switch considers any frame (regardless of size) as eligible for -+ * transmission if the traffic class gate is open for at least 33 ns. -+ * Overruns are prevented by cropping an interval at the end of the gate time -+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns -+ * available for one packet to be transmitted, otherwise the port tc will hang. -+ * This function returns the size of a gate interval that remains available for -+ * setting the guard band, after reserving the space for one egress frame. -+ */ -+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns) -+{ -+ /* Gate always open */ -+ if (gate_len_ns == U64_MAX) -+ return U64_MAX; -+ -+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; -+} -+ - /* Extract shortest continuous gate open intervals in ns for each traffic class - * of a cyclic tc-taprio schedule. If a gate is always open, the duration is - * considered U64_MAX. If the gate is always closed, it is considered 0. -@@ -1471,6 +1490,65 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio, - min_gate_len[tc] = 0; - } - -+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ, -+ * so we need to spell out the register access to each traffic class in helper -+ * functions, to simplify callers -+ */ -+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc, -+ u32 max_sdu) -+{ -+ switch (tc) { -+ case 0: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0, -+ port); -+ break; -+ case 1: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1, -+ port); -+ break; -+ case 2: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2, -+ port); -+ break; -+ case 3: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3, -+ port); -+ break; -+ case 4: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4, -+ port); -+ break; -+ case 5: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5, -+ port); -+ break; -+ case 6: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6, -+ port); -+ break; -+ case 7: -+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7, -+ port); -+ break; -+ } -+} -+ -+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc) -+{ -+ switch (tc) { -+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port); -+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port); -+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port); -+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port); -+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port); -+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port); -+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port); -+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port); -+ default: -+ return 0; -+ } -+} -+ - /* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the - * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU - * values (the default value is 1518). Also, for traffic class windows smaller -@@ -1527,11 +1605,16 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) - - vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len); - -+ mutex_lock(&ocelot->fwd_domain_lock); -+ - for (tc = 0; tc < OCELOT_NUM_TC; tc++) { -+ u64 remaining_gate_len_ps; - u32 max_sdu; - -- if (min_gate_len[tc] == U64_MAX /* Gate always open */ || -- min_gate_len[tc] * 1000 > needed_bit_time_ps) { -+ remaining_gate_len_ps = -+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]); -+ -+ if (remaining_gate_len_ps > needed_bit_time_ps) { - /* Setting QMAXSDU_CFG to 0 disables oversized frame - * dropping. - */ -@@ -1544,9 +1627,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) - /* If traffic class doesn't support a full MTU sized - * frame, make sure to enable oversize frame dropping - * for frames larger than the smallest that would fit. -+ * -+ * However, the exact same register, QSYS_QMAXSDU_CFG_*, -+ * controls not only oversized frame dropping, but also -+ * per-tc static guard band lengths, so it reduces the -+ * useful gate interval length. Therefore, be careful -+ * to calculate a guard band (and therefore max_sdu) -+ * that still leaves 33 ns available in the time slot. - */ -- max_sdu = div_u64(min_gate_len[tc] * 1000, -- picos_per_byte); -+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte); - /* A TC gate may be completely closed, which is a - * special case where all packets are oversized. - * Any limit smaller than 64 octets accomplishes this -@@ -1569,47 +1658,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) - max_sdu); - } - -- /* ocelot_write_rix is a macro that concatenates -- * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out -- * the writes to each traffic class -- */ -- switch (tc) { -- case 0: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0, -- port); -- break; -- case 1: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1, -- port); -- break; -- case 2: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2, -- port); -- break; -- case 3: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3, -- port); -- break; -- case 4: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4, -- port); -- break; -- case 5: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5, -- port); -- break; -- case 6: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6, -- port); -- break; -- case 7: -- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7, -- port); -- break; -- } -+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu); - } - - ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port); -+ -+ ocelot->ops->cut_through_fwd(ocelot); -+ -+ mutex_unlock(&ocelot->fwd_domain_lock); - } - - static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, -@@ -1636,13 +1692,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, - break; - } - -+ mutex_lock(&ocelot->tas_lock); -+ - ocelot_rmw_rix(ocelot, - QSYS_TAG_CONFIG_LINK_SPEED(tas_speed), - QSYS_TAG_CONFIG_LINK_SPEED_M, - QSYS_TAG_CONFIG, port); - -- mutex_lock(&ocelot->tas_lock); -- - if (ocelot_port->taprio) - vsc9959_tas_guard_bands_update(ocelot, port); - -@@ -2709,7 +2765,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot) - { - struct felix *felix = ocelot_to_felix(ocelot); - struct dsa_switch *ds = felix->ds; -- int port, other_port; -+ int tc, port, other_port; - - lockdep_assert_held(&ocelot->fwd_domain_lock); - -@@ -2753,19 +2809,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot) - min_speed = other_ocelot_port->speed; - } - -- /* Enable cut-through forwarding for all traffic classes. */ -- if (ocelot_port->speed == min_speed) -+ /* Enable cut-through forwarding for all traffic classes that -+ * don't have oversized dropping enabled, since this check is -+ * bypassed in cut-through mode. -+ */ -+ if (ocelot_port->speed == min_speed) { - val = GENMASK(7, 0); - -+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) -+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc)) -+ val &= ~BIT(tc); -+ } -+ - set: - tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port); - if (tmp == val) - continue; - - dev_dbg(ocelot->dev, -- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n", -+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n", - port, mask, ocelot_port->speed, min_speed, -- val ? "enabling" : "disabling"); -+ val ? "enabling" : "disabling", val); - - ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port); - } -diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h -index 407fe8f340a06..c5b61bc80f783 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e.h -+++ b/drivers/net/ethernet/intel/i40e/i40e.h -@@ -1291,4 +1291,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, - int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, - struct i40e_cloud_filter *filter, - bool add); -+ -+/** -+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF -+ * @pf: pointer to a pf. -+ * -+ * Check and return value of flag I40E_FLAG_TC_MQPRIO. -+ * -+ * Return: I40E_FLAG_TC_MQPRIO set state. -+ **/ -+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf) -+{ -+ return pf->flags & I40E_FLAG_TC_MQPRIO; -+} -+ - #endif /* _I40E_H_ */ -diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c -index ea2bb0140a6eb..10d7a982a5b9b 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e_client.c -+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c -@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) - "Cannot locate client instance close routine\n"); - return; - } -+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { -+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n"); -+ return; -+ } - cdev->client->ops->close(&cdev->lan_info, cdev->client, reset); - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); - i40e_client_release_qvlist(&cdev->lan_info); -@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf) - /* Remove failed client instance */ - clear_bit(__I40E_CLIENT_INSTANCE_OPENED, - &cdev->state); -- i40e_client_del_instance(pf); - return; - } - } -diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c -index 22a61802a4027..ed9984f1e1b9f 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c -+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c -@@ -4931,7 +4931,7 @@ static int i40e_set_channels(struct net_device *dev, - /* We do not support setting channels via ethtool when TCs are - * configured through mqprio - */ -- if (pf->flags & I40E_FLAG_TC_MQPRIO) -+ if (i40e_is_tc_mqprio_enabled(pf)) - return -EINVAL; - - /* verify they are not requesting separate vectors */ -diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c -index 71a8e1698ed48..1aaf0c5ddf6cf 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e_main.c -+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c -@@ -5339,7 +5339,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) - u8 num_tc = 0; - struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; - -- if (pf->flags & I40E_FLAG_TC_MQPRIO) -+ if (i40e_is_tc_mqprio_enabled(pf)) - return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; - - /* If neither MQPRIO nor DCB is enabled, then always use single TC */ -@@ -5371,7 +5371,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) - **/ - static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) - { -- if (pf->flags & I40E_FLAG_TC_MQPRIO) -+ if (i40e_is_tc_mqprio_enabled(pf)) - return i40e_mqprio_get_enabled_tc(pf); - - /* If neither MQPRIO nor DCB is enabled for this PF then just return -@@ -5468,7 +5468,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, - int i; - - /* There is no need to reset BW when mqprio mode is on. */ -- if (pf->flags & I40E_FLAG_TC_MQPRIO) -+ if (i40e_is_tc_mqprio_enabled(pf)) - return 0; - if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { - ret = i40e_set_bw_limit(vsi, vsi->seid, 0); -@@ -5540,7 +5540,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) - vsi->tc_config.tc_info[i].qoffset); - } - -- if (pf->flags & I40E_FLAG_TC_MQPRIO) -+ if (i40e_is_tc_mqprio_enabled(pf)) - return; - - /* Assign UP2TC map for the VSI */ -@@ -5701,7 +5701,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) - ctxt.vf_num = 0; - ctxt.uplink_seid = vsi->uplink_seid; - ctxt.info = vsi->info; -- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { -+ if (i40e_is_tc_mqprio_enabled(pf)) { - ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); - if (ret) - goto out; -@@ -6425,7 +6425,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - - if (vsi->type == I40E_VSI_MAIN) { -- if (pf->flags & I40E_FLAG_TC_MQPRIO) -+ if (i40e_is_tc_mqprio_enabled(pf)) - i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); - else - i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); -@@ -6536,6 +6536,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) - vsi->tc_seid_map[i] = ch->seid; - } - } -+ -+ /* reset to reconfigure TX queue contexts */ -+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); - return ret; - - err_free: -@@ -7819,7 +7822,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) - netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); - return ERR_PTR(-EINVAL); - } -- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { -+ if (i40e_is_tc_mqprio_enabled(pf)) { - netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); - return ERR_PTR(-EINVAL); - } -@@ -8072,7 +8075,7 @@ config_tc: - /* Quiesce VSI queues */ - i40e_quiesce_vsi(vsi); - -- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) -+ if (!hw && !i40e_is_tc_mqprio_enabled(pf)) - i40e_remove_queue_channels(vsi); - - /* Configure VSI for enabled TCs */ -@@ -8096,7 +8099,7 @@ config_tc: - "Setup channel (id:%u) utilizing num_queues %d\n", - vsi->seid, vsi->tc_config.tc_info[0].qcount); - -- if (pf->flags & I40E_FLAG_TC_MQPRIO) { -+ if (i40e_is_tc_mqprio_enabled(pf)) { - if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; - -@@ -10750,7 +10753,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) - * unless I40E_FLAG_TC_MQPRIO was enabled or DCB - * is not supported with new link speed - */ -- if (pf->flags & I40E_FLAG_TC_MQPRIO) { -+ if (i40e_is_tc_mqprio_enabled(pf)) { - i40e_aq_set_dcb_parameters(hw, false, NULL); - } else { - if (I40E_IS_X710TL_DEVICE(hw->device_id) && -diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c -index af69ccc6e8d2f..07f1e209d524d 100644 ---- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c -+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c -@@ -3689,7 +3689,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev, - u8 prio; - - /* is DCB enabled at all? */ -- if (vsi->tc_config.numtc == 1) -+ if (vsi->tc_config.numtc == 1 || -+ i40e_is_tc_mqprio_enabled(vsi->back)) - return netdev_pick_tx(netdev, skb, sb_dev); - - prio = skb->priority; -diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c -index 6d159334da9ec..981c43b204ff4 100644 ---- a/drivers/net/ethernet/intel/iavf/iavf_main.c -+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c -@@ -2789,6 +2789,11 @@ static void iavf_reset_task(struct work_struct *work) - int i = 0, err; - bool running; - -+ /* Detach interface to avoid subsequent NDO callbacks */ -+ rtnl_lock(); -+ netif_device_detach(netdev); -+ rtnl_unlock(); -+ - /* When device is being removed it doesn't make sense to run the reset - * task, just return in such a case. - */ -@@ -2796,7 +2801,7 @@ static void iavf_reset_task(struct work_struct *work) - if (adapter->state != __IAVF_REMOVE) - queue_work(iavf_wq, &adapter->reset_task); - -- return; -+ goto reset_finish; - } - - while (!mutex_trylock(&adapter->client_lock)) -@@ -2866,7 +2871,6 @@ continue_reset: - - if (running) { - netif_carrier_off(netdev); -- netif_tx_stop_all_queues(netdev); - adapter->link_up = false; - iavf_napi_disable_all(adapter); - } -@@ -2996,7 +3000,7 @@ continue_reset: - mutex_unlock(&adapter->client_lock); - mutex_unlock(&adapter->crit_lock); - -- return; -+ goto reset_finish; - reset_err: - if (running) { - set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); -@@ -3007,6 +3011,10 @@ reset_err: - mutex_unlock(&adapter->client_lock); - mutex_unlock(&adapter->crit_lock); - dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); -+reset_finish: -+ rtnl_lock(); -+ netif_device_attach(netdev); -+ rtnl_unlock(); - } - - /** -diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c -index 136d7911adb48..1e32438081780 100644 ---- a/drivers/net/ethernet/intel/ice/ice_base.c -+++ b/drivers/net/ethernet/intel/ice/ice_base.c -@@ -7,18 +7,6 @@ - #include "ice_dcb_lib.h" - #include "ice_sriov.h" - --static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring) --{ -- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL); -- return !!rx_ring->xdp_buf; --} -- --static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring) --{ -- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); -- return !!rx_ring->rx_buf; --} -- - /** - * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI - * @qs_cfg: gathered variables needed for PF->VSI queues assignment -@@ -519,11 +507,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) - xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index, ring->q_vector->napi.napi_id); - -- kfree(ring->rx_buf); - ring->xsk_pool = ice_xsk_pool(ring); - if (ring->xsk_pool) { -- if (!ice_alloc_rx_buf_zc(ring)) -- return -ENOMEM; - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - - ring->rx_buf_len = -@@ -538,8 +523,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) - dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", - ring->q_index); - } else { -- if (!ice_alloc_rx_buf(ring)) -- return -ENOMEM; - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) - /* coverity[check_return] */ - xdp_rxq_info_reg(&ring->xdp_rxq, -diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c -index 3d45e075204e3..4c6bb7482b362 100644 ---- a/drivers/net/ethernet/intel/ice/ice_main.c -+++ b/drivers/net/ethernet/intel/ice/ice_main.c -@@ -2898,10 +2898,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); - } -+ /* reallocate Rx queues that are used for zero-copy */ -+ xdp_ring_err = ice_realloc_zc_buf(vsi, true); -+ if (xdp_ring_err) -+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); - } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { - xdp_ring_err = ice_destroy_xdp_rings(vsi); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); -+ /* reallocate Rx queues that were used for zero-copy */ -+ xdp_ring_err = ice_realloc_zc_buf(vsi, false); -+ if (xdp_ring_err) -+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); - } else { - /* safe to call even when prog == vsi->xdp_prog as - * dev_xdp_install in net/core/dev.c incremented prog's -@@ -3904,7 +3912,7 @@ static int ice_init_pf(struct ice_pf *pf) - - pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); - if (!pf->avail_rxqs) { -- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); -+ bitmap_free(pf->avail_txqs); - pf->avail_txqs = NULL; - return -ENOMEM; - } -diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c -index e48e29258450f..03ce85f6e6df8 100644 ---- a/drivers/net/ethernet/intel/ice/ice_xsk.c -+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c -@@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) - err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); - if (err) - return err; -+ ice_clean_rx_ring(rx_ring); - - ice_qvec_toggle_napi(vsi, q_vector, false); - ice_qp_clean_rings(vsi, q_idx); -@@ -316,6 +317,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) - return 0; - } - -+/** -+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer -+ * @rx_ring: Rx ring -+ * @pool_present: is pool for XSK present -+ * -+ * Try allocating memory and return ENOMEM, if failed to allocate. -+ * If allocation was successful, substitute buffer with allocated one. -+ * Returns 0 on success, negative on failure -+ */ -+static int -+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) -+{ -+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : -+ sizeof(*rx_ring->rx_buf); -+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); -+ -+ if (!sw_ring) -+ return -ENOMEM; -+ -+ if (pool_present) { -+ kfree(rx_ring->rx_buf); -+ rx_ring->rx_buf = NULL; -+ rx_ring->xdp_buf = sw_ring; -+ } else { -+ kfree(rx_ring->xdp_buf); -+ rx_ring->xdp_buf = NULL; -+ rx_ring->rx_buf = sw_ring; -+ } -+ -+ return 0; -+} -+ -+/** -+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs -+ * @vsi: Current VSI -+ * @zc: is zero copy set -+ * -+ * Reallocate buffer for rx_rings that might be used by XSK. -+ * XDP requires more memory, than rx_buf provides. -+ * Returns 0 on success, negative on failure -+ */ -+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) -+{ -+ struct ice_rx_ring *rx_ring; -+ unsigned long q; -+ -+ for_each_set_bit(q, vsi->af_xdp_zc_qps, -+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { -+ rx_ring = vsi->rx_rings[q]; -+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ - /** - * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state - * @vsi: Current VSI -@@ -345,11 +402,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) - if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); - - if (if_running) { -+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; -+ - ret = ice_qp_dis(vsi, qid); - if (ret) { - netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); - goto xsk_pool_if_up; - } -+ -+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present); -+ if (ret) -+ goto xsk_pool_if_up; - } - - pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) : -diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h -index 21faec8e97db1..4edbe81eb6460 100644 ---- a/drivers/net/ethernet/intel/ice/ice_xsk.h -+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h -@@ -27,6 +27,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); - void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); - void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); - bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget); -+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); - #else - static inline bool - ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring, -@@ -72,5 +73,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev, - - static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } - static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } -+ -+static inline int -+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, -+ bool __always_unused zc) -+{ -+ return 0; -+} - #endif /* CONFIG_XDP_SOCKETS */ - #endif /* !_ICE_XSK_H_ */ -diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c -index dab8f3f771f84..cfe804bc8d205 100644 ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -412,7 +412,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - if (entry->hash != 0xffff) { - ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE; - ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, -- MTK_FOE_STATE_BIND); -+ MTK_FOE_STATE_UNBIND); - dma_wmb(); - } - entry->hash = 0xffff; -diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h -index 1f5cf1c9a9475..69ffce04d6306 100644 ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -293,6 +293,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) - if (!ppe) - return; - -+ if (hash > MTK_PPE_HASH_MASK) -+ return; -+ - now = (u16)jiffies; - diff = now - ppe->foe_check_time[hash]; - if (diff < HZ / 10) -diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c -index 73f7962a37d33..c49062ad72c6c 100644 ---- a/drivers/net/phy/meson-gxl.c -+++ b/drivers/net/phy/meson-gxl.c -@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev) - irq_status == INTSRC_ENERGY_DETECT) - return IRQ_HANDLED; - -- /* Give PHY some time before MAC starts sending data. This works -- * around an issue where network doesn't come up properly. -- */ -- if (!(irq_status & INTSRC_LINK_DOWN)) -- phy_queue_state_machine(phydev, msecs_to_jiffies(100)); -- else -- phy_trigger_machine(phydev); -+ phy_trigger_machine(phydev); - - return IRQ_HANDLED; - } -diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c -index d4c93d59bc539..8569a545e0a3f 100644 ---- a/drivers/net/phy/microchip_t1.c -+++ b/drivers/net/phy/microchip_t1.c -@@ -28,12 +28,16 @@ - - /* Interrupt Source Register */ - #define LAN87XX_INTERRUPT_SOURCE (0x18) -+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08) - - /* Interrupt Mask Register */ - #define LAN87XX_INTERRUPT_MASK (0x19) - #define LAN87XX_MASK_LINK_UP (0x0004) - #define LAN87XX_MASK_LINK_DOWN (0x0002) - -+#define LAN87XX_INTERRUPT_MASK_2 (0x09) -+#define LAN87XX_MASK_COMM_RDY BIT(10) -+ - /* MISC Control 1 Register */ - #define LAN87XX_CTRL_1 (0x11) - #define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000) -@@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev) - int rc, val = 0; - - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { -- /* unmask all source and clear them before enable */ -- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF); -+ /* clear all interrupt */ -+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); -+ if (rc < 0) -+ return rc; -+ - rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); -- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN; -+ if (rc < 0) -+ return rc; -+ -+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, -+ PHYACC_ATTR_BANK_MISC, -+ LAN87XX_INTERRUPT_MASK_2, val); -+ if (rc < 0) -+ return rc; -+ -+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, -+ PHYACC_ATTR_BANK_MISC, -+ LAN87XX_INTERRUPT_SOURCE_2, 0); -+ if (rc < 0) -+ return rc; -+ -+ /* enable link down and comm ready interrupt */ -+ val = LAN87XX_MASK_LINK_DOWN; - rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); -+ if (rc < 0) -+ return rc; -+ -+ val = LAN87XX_MASK_COMM_RDY; -+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, -+ PHYACC_ATTR_BANK_MISC, -+ LAN87XX_INTERRUPT_MASK_2, val); - } else { - rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); -- if (rc) -+ if (rc < 0) - return rc; - - rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); -+ if (rc < 0) -+ return rc; -+ -+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE, -+ PHYACC_ATTR_BANK_MISC, -+ LAN87XX_INTERRUPT_MASK_2, val); -+ if (rc < 0) -+ return rc; -+ -+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, -+ PHYACC_ATTR_BANK_MISC, -+ LAN87XX_INTERRUPT_SOURCE_2, 0); - } - - return rc < 0 ? rc : 0; -@@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev) - { - int irq_status; - -+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ, -+ PHYACC_ATTR_BANK_MISC, -+ LAN87XX_INTERRUPT_SOURCE_2, 0); -+ if (irq_status < 0) { -+ phy_error(phydev); -+ return IRQ_NONE; -+ } -+ - irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); - if (irq_status < 0) { - phy_error(phydev); -diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c -index c62f299b9e0a8..d8a5dbf89a021 100644 ---- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c -+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c -@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, - /* Repeat initial/next rate. - * For legacy IL_NUMBER_TRY == 1, this loop will not execute. - * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */ -- while (repeat_rate > 0) { -+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) { - if (is_legacy(tbl_type.lq_type)) { - if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) - ant_toggle_cnt++; -@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, - cpu_to_le32(new_rate); - repeat_rate--; - idx++; -- if (idx >= LINK_QUAL_MAX_RETRY_NUM) -- goto out; - } - - il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, -@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta, - repeat_rate--; - } - --out: - lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; - -diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c -index b0f58bcf70cb0..106c88b723b90 100644 ---- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c -+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c -@@ -345,7 +345,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev) - - err = mt7921e_driver_own(dev); - if (err) -- return err; -+ goto out; - - err = mt7921_run_firmware(dev); - if (err) -diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h -index a067274c20144..bf001e9def6aa 100644 ---- a/drivers/net/wireless/microchip/wilc1000/netdev.h -+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h -@@ -254,6 +254,7 @@ struct wilc { - u8 *rx_buffer; - u32 rx_buffer_offset; - u8 *tx_buffer; -+ u32 *vmm_table; - - struct txq_handle txq[NQUEUES]; - int txq_entries; -diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c -index 7962c11cfe848..56f924a31bc66 100644 ---- a/drivers/net/wireless/microchip/wilc1000/sdio.c -+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c -@@ -27,6 +27,7 @@ struct wilc_sdio { - bool irq_gpio; - u32 block_size; - int has_thrpt_enh3; -+ u8 *cmd53_buf; - }; - - struct sdio_cmd52 { -@@ -46,6 +47,7 @@ struct sdio_cmd53 { - u32 count: 9; - u8 *buffer; - u32 block_size; -+ bool use_global_buf; - }; - - static const struct wilc_hif_func wilc_hif_sdio; -@@ -90,6 +92,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd) - { - struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev); - int size, ret; -+ struct wilc_sdio *sdio_priv = wilc->bus_data; -+ u8 *buf = cmd->buffer; - - sdio_claim_host(func); - -@@ -100,12 +104,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd) - else - size = cmd->count; - -+ if (cmd->use_global_buf) { -+ if (size > sizeof(u32)) -+ return -EINVAL; -+ -+ buf = sdio_priv->cmd53_buf; -+ } -+ - if (cmd->read_write) { /* write */ -- ret = sdio_memcpy_toio(func, cmd->address, -- (void *)cmd->buffer, size); -+ if (cmd->use_global_buf) -+ memcpy(buf, cmd->buffer, size); -+ -+ ret = sdio_memcpy_toio(func, cmd->address, buf, size); - } else { /* read */ -- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer, -- cmd->address, size); -+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size); -+ -+ if (cmd->use_global_buf) -+ memcpy(cmd->buffer, buf, size); - } - - sdio_release_host(func); -@@ -127,6 +142,12 @@ static int wilc_sdio_probe(struct sdio_func *func, - if (!sdio_priv) - return -ENOMEM; - -+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL); -+ if (!sdio_priv->cmd53_buf) { -+ ret = -ENOMEM; -+ goto free; -+ } -+ - ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO, - &wilc_hif_sdio); - if (ret) -@@ -160,6 +181,7 @@ dispose_irq: - irq_dispose_mapping(wilc->dev_irq_num); - wilc_netdev_cleanup(wilc); - free: -+ kfree(sdio_priv->cmd53_buf); - kfree(sdio_priv); - return ret; - } -@@ -171,6 +193,7 @@ static void wilc_sdio_remove(struct sdio_func *func) - - clk_disable_unprepare(wilc->rtc_clk); - wilc_netdev_cleanup(wilc); -+ kfree(sdio_priv->cmd53_buf); - kfree(sdio_priv); - } - -@@ -367,8 +390,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) - cmd.address = WILC_SDIO_FBR_DATA_REG; - cmd.block_mode = 0; - cmd.increment = 1; -- cmd.count = 4; -+ cmd.count = sizeof(u32); - cmd.buffer = (u8 *)&data; -+ cmd.use_global_buf = true; - cmd.block_size = sdio_priv->block_size; - ret = wilc_sdio_cmd53(wilc, &cmd); - if (ret) -@@ -406,6 +430,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) - nblk = size / block_size; - nleft = size % block_size; - -+ cmd.use_global_buf = false; - if (nblk > 0) { - cmd.block_mode = 1; - cmd.increment = 1; -@@ -484,8 +509,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) - cmd.address = WILC_SDIO_FBR_DATA_REG; - cmd.block_mode = 0; - cmd.increment = 1; -- cmd.count = 4; -+ cmd.count = sizeof(u32); - cmd.buffer = (u8 *)data; -+ cmd.use_global_buf = true; - - cmd.block_size = sdio_priv->block_size; - ret = wilc_sdio_cmd53(wilc, &cmd); -@@ -527,6 +553,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) - nblk = size / block_size; - nleft = size % block_size; - -+ cmd.use_global_buf = false; - if (nblk > 0) { - cmd.block_mode = 1; - cmd.increment = 1; -diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c -index 48441f0389ca1..0c8a571486d25 100644 ---- a/drivers/net/wireless/microchip/wilc1000/wlan.c -+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c -@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) - int ret = 0; - int counter; - int timeout; -- u32 vmm_table[WILC_VMM_TBL_SIZE]; -+ u32 *vmm_table = wilc->vmm_table; - u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; - const struct wilc_hif_func *func; - int srcu_idx; -@@ -1251,6 +1251,8 @@ void wilc_wlan_cleanup(struct net_device *dev) - while ((rqe = wilc_wlan_rxq_remove(wilc))) - kfree(rqe); - -+ kfree(wilc->vmm_table); -+ wilc->vmm_table = NULL; - kfree(wilc->rx_buffer); - wilc->rx_buffer = NULL; - kfree(wilc->tx_buffer); -@@ -1485,6 +1487,14 @@ int wilc_wlan_init(struct net_device *dev) - goto fail; - } - -+ if (!wilc->vmm_table) -+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL); -+ -+ if (!wilc->vmm_table) { -+ ret = -ENOBUFS; -+ goto fail; -+ } -+ - if (!wilc->tx_buffer) - wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL); - -@@ -1509,7 +1519,8 @@ int wilc_wlan_init(struct net_device *dev) - return 0; - - fail: -- -+ kfree(wilc->vmm_table); -+ wilc->vmm_table = NULL; - kfree(wilc->rx_buffer); - wilc->rx_buffer = NULL; - kfree(wilc->tx_buffer); -diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c -index 990360d75cb64..e85b3c5d4acce 100644 ---- a/drivers/net/xen-netback/xenbus.c -+++ b/drivers/net/xen-netback/xenbus.c -@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be) - unsigned int queue_index; - - xen_unregister_watchers(vif); -- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); - #ifdef CONFIG_DEBUG_FS - xenvif_debugfs_delif(vif); - #endif /* CONFIG_DEBUG_FS */ -@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev) - struct backend_info *be = dev_get_drvdata(&dev->dev); - - unregister_hotplug_status_watch(be); -+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); - if (be->vif) { - kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); - backend_disconnect(be); -diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c -index 7a9e6ffa23429..daa0e160e1212 100644 ---- a/drivers/nvme/host/tcp.c -+++ b/drivers/nvme/host/tcp.c -@@ -121,7 +121,6 @@ struct nvme_tcp_queue { - struct mutex send_mutex; - struct llist_head req_list; - struct list_head send_list; -- bool more_requests; - - /* recv state */ - void *pdu; -@@ -318,7 +317,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) - static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) - { - return !list_empty(&queue->send_list) || -- !llist_empty(&queue->req_list) || queue->more_requests; -+ !llist_empty(&queue->req_list); - } - - static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, -@@ -337,9 +336,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, - */ - if (queue->io_cpu == raw_smp_processor_id() && - sync && empty && mutex_trylock(&queue->send_mutex)) { -- queue->more_requests = !last; - nvme_tcp_send_all(queue); -- queue->more_requests = false; - mutex_unlock(&queue->send_mutex); - } - -@@ -1227,7 +1224,7 @@ static void nvme_tcp_io_work(struct work_struct *w) - else if (unlikely(result < 0)) - return; - -- if (!pending) -+ if (!pending || !queue->rd_enabled) - return; - - } while (!time_after(jiffies, deadline)); /* quota is exhausted */ -diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c -index c27660a660d9a..a339719100051 100644 ---- a/drivers/nvme/target/core.c -+++ b/drivers/nvme/target/core.c -@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) - - static void __nvmet_req_complete(struct nvmet_req *req, u16 status) - { -+ struct nvmet_ns *ns = req->ns; -+ - if (!req->sq->sqhd_disabled) - nvmet_update_sq_head(req); - req->cqe->sq_id = cpu_to_le16(req->sq->qid); -@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) - - trace_nvmet_req_complete(req); - -- if (req->ns) -- nvmet_put_namespace(req->ns); - req->ops->queue_response(req); -+ if (ns) -+ nvmet_put_namespace(ns); - } - - void nvmet_req_complete(struct nvmet_req *req, u16 status) -diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c -index 82b61acf7a72b..1956be87ac5ff 100644 ---- a/drivers/nvme/target/zns.c -+++ b/drivers/nvme/target/zns.c -@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) - struct nvme_id_ns_zns *id_zns; - u64 zsze; - u16 status; -+ u32 mar, mor; - - if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { - req->error_loc = offsetof(struct nvme_identify, nsid); -@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) - zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> - req->ns->blksize_shift; - id_zns->lbafe[0].zsze = cpu_to_le64(zsze); -- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); -- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); -+ -+ mor = bdev_max_open_zones(req->ns->bdev); -+ if (!mor) -+ mor = U32_MAX; -+ else -+ mor--; -+ id_zns->mor = cpu_to_le32(mor); -+ -+ mar = bdev_max_active_zones(req->ns->bdev); -+ if (!mar) -+ mar = U32_MAX; -+ else -+ mar--; -+ id_zns->mar = cpu_to_le32(mar); - - done: - status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); -diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c -index 9be007c9420f9..f69ab90b5e22d 100644 ---- a/drivers/parisc/ccio-dma.c -+++ b/drivers/parisc/ccio-dma.c -@@ -1380,15 +1380,17 @@ ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) - } - } - --static void __init ccio_init_resources(struct ioc *ioc) -+static int __init ccio_init_resources(struct ioc *ioc) - { - struct resource *res = ioc->mmio_region; - char *name = kmalloc(14, GFP_KERNEL); -- -+ if (unlikely(!name)) -+ return -ENOMEM; - snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); - - ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); - ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); -+ return 0; - } - - static int new_ioc_area(struct resource *res, unsigned long size, -@@ -1543,7 +1545,10 @@ static int __init ccio_probe(struct parisc_device *dev) - return -ENOMEM; - } - ccio_ioc_init(ioc); -- ccio_init_resources(ioc); -+ if (ccio_init_resources(ioc)) { -+ kfree(ioc); -+ return -ENOMEM; -+ } - hppa_dma_ops = &ccio_ops; - - hba = kzalloc(sizeof(*hba), GFP_KERNEL); -diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c -index 231d86d3949c0..1ec5baa673f92 100644 ---- a/drivers/perf/riscv_pmu_sbi.c -+++ b/drivers/perf/riscv_pmu_sbi.c -@@ -467,7 +467,7 @@ static int pmu_sbi_get_ctrinfo(int nctr) - if (!pmu_ctr_list) - return -ENOMEM; - -- for (i = 0; i <= nctr; i++) { -+ for (i = 0; i < nctr; i++) { - ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); - if (ret.error) - /* The logical counter ids are not expected to be contiguous */ -diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c -index 1e54a833f2cf0..a9daaf4d5aaab 100644 ---- a/drivers/regulator/core.c -+++ b/drivers/regulator/core.c -@@ -2732,13 +2732,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev) - */ - static int _regulator_handle_consumer_enable(struct regulator *regulator) - { -+ int ret; - struct regulator_dev *rdev = regulator->rdev; - - lockdep_assert_held_once(&rdev->mutex.base); - - regulator->enable_count++; -- if (regulator->uA_load && regulator->enable_count == 1) -- return drms_uA_update(rdev); -+ if (regulator->uA_load && regulator->enable_count == 1) { -+ ret = drms_uA_update(rdev); -+ if (ret) -+ regulator->enable_count--; -+ return ret; -+ } - - return 0; - } -diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c -index 750dd1e9f2cc7..2ddc431cbd337 100644 ---- a/drivers/scsi/lpfc/lpfc_init.c -+++ b/drivers/scsi/lpfc/lpfc_init.c -@@ -8061,7 +8061,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) - /* Allocate device driver memory */ - rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); - if (rc) -- return -ENOMEM; -+ goto out_destroy_workqueue; - - /* IF Type 2 ports get initialized now. */ - if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= -@@ -8489,6 +8489,9 @@ out_free_bsmbx: - lpfc_destroy_bootstrap_mbox(phba); - out_free_mem: - lpfc_mem_free(phba); -+out_destroy_workqueue: -+ destroy_workqueue(phba->wq); -+ phba->wq = NULL; - return rc; - } - -diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c -index 5b5885d9732b6..3e9b2b0099c7a 100644 ---- a/drivers/scsi/megaraid/megaraid_sas_fusion.c -+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c -@@ -5311,7 +5311,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance) - if (!fusion->log_to_span) { - dev_err(&instance->pdev->dev, "Failed from %s %d\n", - __func__, __LINE__); -- kfree(instance->ctrl_context); - return -ENOMEM; - } - } -diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c -index 5e8887fa02c8a..e3b7ebf464244 100644 ---- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c -+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c -@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) - fw_event = list_first_entry(&ioc->fw_event_list, - struct fw_event_work, list); - list_del_init(&fw_event->list); -+ fw_event_work_put(fw_event); - } - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); - -@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) - if (cancel_work_sync(&fw_event->work)) - fw_event_work_put(fw_event); - -- fw_event_work_put(fw_event); - } - ioc->fw_events_cleanup = 0; - } -diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c -index 2b2f682883752..62666df1a59eb 100644 ---- a/drivers/scsi/qla2xxx/qla_target.c -+++ b/drivers/scsi/qla2xxx/qla_target.c -@@ -6935,14 +6935,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) - - if (ha->flags.msix_enabled) { - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { -- if (IS_QLA2071(ha)) { -- /* 4 ports Baker: Enable Interrupt Handshake */ -- icb->msix_atio = 0; -- icb->firmware_options_2 |= cpu_to_le32(BIT_26); -- } else { -- icb->msix_atio = cpu_to_le16(msix->entry); -- icb->firmware_options_2 &= cpu_to_le32(~BIT_26); -- } -+ icb->msix_atio = cpu_to_le16(msix->entry); -+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26); - ql_dbg(ql_dbg_init, vha, 0xf072, - "Registering ICB vector 0x%x for atio que.\n", - msix->entry); -diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c -index 78edb1ea4748d..f5c876d03c1ad 100644 ---- a/drivers/scsi/scsi_lib.c -+++ b/drivers/scsi/scsi_lib.c -@@ -118,7 +118,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) - } - } - --static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) -+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) - { - struct request *rq = scsi_cmd_to_rq(cmd); - -@@ -128,7 +128,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) - } else { - WARN_ON_ONCE(true); - } -- blk_mq_requeue_request(rq, true); -+ -+ if (msecs) { -+ blk_mq_requeue_request(rq, false); -+ blk_mq_delay_kick_requeue_list(rq->q, msecs); -+ } else -+ blk_mq_requeue_request(rq, true); - } - - /** -@@ -658,14 +663,6 @@ static unsigned int scsi_rq_err_bytes(const struct request *rq) - return bytes; - } - --/* Helper for scsi_io_completion() when "reprep" action required. */ --static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, -- struct request_queue *q) --{ -- /* A new command will be prepared and issued. */ -- scsi_mq_requeue_cmd(cmd); --} -- - static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) - { - struct request *req = scsi_cmd_to_rq(cmd); -@@ -683,14 +680,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) - return false; - } - -+/* -+ * When ALUA transition state is returned, reprep the cmd to -+ * use the ALUA handler's transition timeout. Delay the reprep -+ * 1 sec to avoid aggressive retries of the target in that -+ * state. -+ */ -+#define ALUA_TRANSITION_REPREP_DELAY 1000 -+ - /* Helper for scsi_io_completion() when special action required. */ - static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) - { -- struct request_queue *q = cmd->device->request_queue; - struct request *req = scsi_cmd_to_rq(cmd); - int level = 0; -- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, -- ACTION_DELAYED_RETRY} action; -+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP, -+ ACTION_RETRY, ACTION_DELAYED_RETRY} action; - struct scsi_sense_hdr sshdr; - bool sense_valid; - bool sense_current = true; /* false implies "deferred sense" */ -@@ -779,8 +783,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) - action = ACTION_DELAYED_RETRY; - break; - case 0x0a: /* ALUA state transition */ -- blk_stat = BLK_STS_TRANSPORT; -- fallthrough; -+ action = ACTION_DELAYED_REPREP; -+ break; - default: - action = ACTION_FAIL; - break; -@@ -839,7 +843,10 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) - return; - fallthrough; - case ACTION_REPREP: -- scsi_io_completion_reprep(cmd, q); -+ scsi_mq_requeue_cmd(cmd, 0); -+ break; -+ case ACTION_DELAYED_REPREP: -+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY); - break; - case ACTION_RETRY: - /* Retry the same command immediately */ -@@ -933,7 +940,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, - * command block will be released and the queue function will be goosed. If we - * are not done then we have to figure out what to do next: - * -- * a) We can call scsi_io_completion_reprep(). The request will be -+ * a) We can call scsi_mq_requeue_cmd(). The request will be - * unprepared and put back on the queue. Then a new command will - * be created for it. This should be used if we made forward - * progress, or if we want to switch from READ(10) to READ(6) for -@@ -949,7 +956,6 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, - void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) - { - int result = cmd->result; -- struct request_queue *q = cmd->device->request_queue; - struct request *req = scsi_cmd_to_rq(cmd); - blk_status_t blk_stat = BLK_STS_OK; - -@@ -986,7 +992,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) - * request just queue the command up again. - */ - if (likely(result == 0)) -- scsi_io_completion_reprep(cmd, q); -+ scsi_mq_requeue_cmd(cmd, 0); - else - scsi_io_completion_action(cmd, result); - } -diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c -index 70ad0f3dce283..286f5d57c0cab 100644 ---- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c -+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c -@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - const struct of_device_id *of_id = NULL; - struct device_node *dn; - void __iomem *base; -- int ret, i; -+ int ret, i, s; - - /* AON ctrl registers */ - base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL); - if (IS_ERR(base)) { - pr_err("error mapping AON_CTRL\n"); -- return PTR_ERR(base); -+ ret = PTR_ERR(base); -+ goto aon_err; - } - ctrl.aon_ctrl_base = base; - -@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - /* Assume standard offset */ - ctrl.aon_sram = ctrl.aon_ctrl_base + - AON_CTRL_SYSTEM_DATA_RAM_OFS; -+ s = 0; - } else { - ctrl.aon_sram = base; -+ s = 1; - } - - writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC); -@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - (const void **)&ddr_phy_data); - if (IS_ERR(base)) { - pr_err("error mapping DDR PHY\n"); -- return PTR_ERR(base); -+ ret = PTR_ERR(base); -+ goto ddr_phy_err; - } - ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot; - ctrl.pll_status_offset = ddr_phy_data->pll_status_offset; -@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - for_each_matching_node(dn, ddr_shimphy_dt_ids) { - i = ctrl.num_memc; - if (i >= MAX_NUM_MEMC) { -+ of_node_put(dn); - pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC); - break; - } - - base = of_io_request_and_map(dn, 0, dn->full_name); - if (IS_ERR(base)) { -+ of_node_put(dn); - if (!ctrl.support_warm_boot) - break; - - pr_err("error mapping DDR SHIMPHY %d\n", i); -- return PTR_ERR(base); -+ ret = PTR_ERR(base); -+ goto ddr_shimphy_err; - } - ctrl.memcs[i].ddr_shimphy_base = base; - ctrl.num_memc++; -@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - for_each_matching_node(dn, brcmstb_memc_of_match) { - base = of_iomap(dn, 0); - if (!base) { -+ of_node_put(dn); - pr_err("error mapping DDR Sequencer %d\n", i); -- return -ENOMEM; -+ ret = -ENOMEM; -+ goto brcmstb_memc_err; - } - - of_id = of_match_node(brcmstb_memc_of_match, dn); - if (!of_id) { - iounmap(base); -- return -EINVAL; -+ of_node_put(dn); -+ ret = -EINVAL; -+ goto brcmstb_memc_err; - } - - ddr_seq_data = of_id->data; -@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - dn = of_find_matching_node(NULL, sram_dt_ids); - if (!dn) { - pr_err("SRAM not found\n"); -- return -EINVAL; -+ ret = -EINVAL; -+ goto brcmstb_memc_err; - } - - ret = brcmstb_init_sram(dn); - of_node_put(dn); - if (ret) { - pr_err("error setting up SRAM for PM\n"); -- return ret; -+ goto brcmstb_memc_err; - } - - ctrl.pdev = pdev; - - ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL); -- if (!ctrl.s3_params) -- return -ENOMEM; -+ if (!ctrl.s3_params) { -+ ret = -ENOMEM; -+ goto s3_params_err; -+ } - ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params, - sizeof(*ctrl.s3_params), - DMA_TO_DEVICE); -@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev) - - out: - kfree(ctrl.s3_params); -- -+s3_params_err: -+ iounmap(ctrl.boot_sram); -+brcmstb_memc_err: -+ for (i--; i >= 0; i--) -+ iounmap(ctrl.memcs[i].ddr_ctrl); -+ddr_shimphy_err: -+ for (i = 0; i < ctrl.num_memc; i++) -+ iounmap(ctrl.memcs[i].ddr_shimphy_base); -+ -+ iounmap(ctrl.memcs[0].ddr_phy_base); -+ddr_phy_err: -+ iounmap(ctrl.aon_ctrl_base); -+ if (s) -+ iounmap(ctrl.aon_sram); -+aon_err: - pr_warn("PM: initialization failed with code %d\n", ret); - - return ret; -diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig -index 07d52cafbb313..fcec6ed83d5e2 100644 ---- a/drivers/soc/fsl/Kconfig -+++ b/drivers/soc/fsl/Kconfig -@@ -24,6 +24,7 @@ config FSL_MC_DPIO - tristate "QorIQ DPAA2 DPIO driver" - depends on FSL_MC_BUS - select SOC_BUS -+ select FSL_GUTS - select DIMLIB - help - Driver for the DPAA2 DPIO object. A DPIO provides queue and -diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c -index 85aa86e1338af..5a3809f6a698f 100644 ---- a/drivers/soc/imx/gpcv2.c -+++ b/drivers/soc/imx/gpcv2.c -@@ -333,6 +333,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd) - } - } - -+ reset_control_assert(domain->reset); -+ - /* Enable reset clocks for all devices in the domain */ - ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks); - if (ret) { -@@ -340,7 +342,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd) - goto out_regulator_disable; - } - -- reset_control_assert(domain->reset); -+ /* delays for reset to propagate */ -+ udelay(5); - - if (domain->bits.pxx) { - /* request the domain to power up */ -diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c -index 7ebc28709e945..2782a7e0a8719 100644 ---- a/drivers/soc/imx/imx8m-blk-ctrl.c -+++ b/drivers/soc/imx/imx8m-blk-ctrl.c -@@ -242,7 +242,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev) - ret = PTR_ERR(domain->power_dev); - goto cleanup_pds; - } -- dev_set_name(domain->power_dev, "%s", data->name); - - domain->genpd.name = data->name; - domain->genpd.power_on = imx8m_blk_ctrl_power_on; -diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h -index 267342dfa7388..2dcbe166df63e 100644 ---- a/drivers/spi/spi-bitbang-txrx.h -+++ b/drivers/spi/spi-bitbang-txrx.h -@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi, - { - /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ - -+ u8 rxbit = bits - 1; - u32 oldbit = !(word & 1); - /* clock starts at inactive polarity */ - for (; likely(bits); bits--) { -@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi, - /* sample LSB (from slave) on leading edge */ - word >>= 1; - if ((flags & SPI_MASTER_NO_RX) == 0) -- word |= getmiso(spi) << (bits - 1); -+ word |= getmiso(spi) << rxbit; - setsck(spi, cpol); - } - return word; -@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi, - { - /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ - -+ u8 rxbit = bits - 1; - u32 oldbit = !(word & 1); - /* clock starts at inactive polarity */ - for (; likely(bits); bits--) { -@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi, - /* sample LSB (from slave) on trailing edge */ - word >>= 1; - if ((flags & SPI_MASTER_NO_RX) == 0) -- word |= getmiso(spi) << (bits - 1); -+ word |= getmiso(spi) << rxbit; - } - return word; - } -diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c -index 1175f3a46859f..27295bda3e0bd 100644 ---- a/drivers/tee/tee_shm.c -+++ b/drivers/tee/tee_shm.c -@@ -9,6 +9,7 @@ - #include - #include - #include -+#include - #include - #include "tee_private.h" - -diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c -index 80d4e0676083a..365489bf4b8c1 100644 ---- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c -+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c -@@ -527,7 +527,7 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv) - priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, - obj->package.elements[0].buffer.length, - GFP_KERNEL); -- if (!priv->data_vault) -+ if (ZERO_OR_NULL_PTR(priv->data_vault)) - goto out_free; - - bin_attr_data_vault.private = priv->data_vault; -@@ -597,7 +597,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) - goto free_imok; - } - -- if (priv->data_vault) { -+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) { - result = sysfs_create_group(&pdev->dev.kobj, - &data_attribute_group); - if (result) -@@ -615,7 +615,8 @@ static int int3400_thermal_probe(struct platform_device *pdev) - free_sysfs: - cleanup_odvp(priv); - if (priv->data_vault) { -- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); -+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) -+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); - kfree(priv->data_vault); - } - free_uuid: -@@ -647,7 +648,7 @@ static int int3400_thermal_remove(struct platform_device *pdev) - if (!priv->rel_misc_dev_res) - acpi_thermal_rel_misc_device_remove(priv->adev->handle); - -- if (priv->data_vault) -+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) - sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group); - sysfs_remove_group(&pdev->dev.kobj, &uuid_attribute_group); - sysfs_remove_group(&pdev->dev.kobj, &imok_attribute_group); -diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c -index a51ca56a0ebe7..829da9cb14a86 100644 ---- a/drivers/ufs/core/ufshcd.c -+++ b/drivers/ufs/core/ufshcd.c -@@ -8723,6 +8723,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, - struct scsi_device *sdp; - unsigned long flags; - int ret, retries; -+ unsigned long deadline; -+ int32_t remaining; - - spin_lock_irqsave(hba->host->host_lock, flags); - sdp = hba->ufs_device_wlun; -@@ -8755,9 +8757,14 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, - * callbacks hence set the RQF_PM flag so that it doesn't resume the - * already suspended childs. - */ -+ deadline = jiffies + 10 * HZ; - for (retries = 3; retries > 0; --retries) { -+ ret = -ETIMEDOUT; -+ remaining = deadline - jiffies; -+ if (remaining <= 0) -+ break; - ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr, -- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL); -+ remaining / HZ, 0, 0, RQF_PM, NULL); - if (!scsi_status_is_check_condition(ret) || - !scsi_sense_valid(&sshdr) || - sshdr.sense_key != UNIT_ATTENTION) -diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c -index c13b9290e3575..d0057d18d2f4a 100644 ---- a/drivers/vfio/vfio_iommu_type1.c -+++ b/drivers/vfio/vfio_iommu_type1.c -@@ -557,6 +557,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, - ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, - pages, NULL, NULL); - if (ret > 0) { -+ int i; -+ -+ /* -+ * The zero page is always resident, we don't need to pin it -+ * and it falls into our invalid/reserved test so we don't -+ * unpin in put_pfn(). Unpin all zero pages in the batch here. -+ */ -+ for (i = 0 ; i < ret; i++) { -+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i])))) -+ unpin_user_page(pages[i]); -+ } -+ - *pfn = page_to_pfn(pages[0]); - goto done; - } -diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c -index 393894af26f84..2b00a9d554fc0 100644 ---- a/drivers/video/fbdev/chipsfb.c -+++ b/drivers/video/fbdev/chipsfb.c -@@ -430,6 +430,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) - err_release_fb: - framebuffer_release(p); - err_disable: -+ pci_disable_device(dp); - err_out: - return rc; - } -diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c -index c2a60b187467e..4d7f63892dcc4 100644 ---- a/drivers/video/fbdev/core/fbsysfs.c -+++ b/drivers/video/fbdev/core/fbsysfs.c -@@ -84,6 +84,10 @@ void framebuffer_release(struct fb_info *info) - if (WARN_ON(refcount_read(&info->count))) - return; - -+#if IS_ENABLED(CONFIG_FB_BACKLIGHT) -+ mutex_destroy(&info->bl_curve_mutex); -+#endif -+ - kfree(info->apertures); - kfree(info); - } -diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c -index 292fcb0a24fc9..6ff237cee7f87 100644 ---- a/drivers/video/fbdev/omap/omapfb_main.c -+++ b/drivers/video/fbdev/omap/omapfb_main.c -@@ -1643,14 +1643,14 @@ static int omapfb_do_probe(struct platform_device *pdev, - goto cleanup; - } - fbdev->int_irq = platform_get_irq(pdev, 0); -- if (!fbdev->int_irq) { -+ if (fbdev->int_irq < 0) { - dev_err(&pdev->dev, "unable to get irq\n"); - r = ENXIO; - goto cleanup; - } - - fbdev->ext_irq = platform_get_irq(pdev, 1); -- if (!fbdev->ext_irq) { -+ if (fbdev->ext_irq < 0) { - dev_err(&pdev->dev, "unable to get irq\n"); - r = ENXIO; - goto cleanup; -diff --git a/fs/afs/flock.c b/fs/afs/flock.c -index c4210a3964d8b..bbcc5afd15760 100644 ---- a/fs/afs/flock.c -+++ b/fs/afs/flock.c -@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call) - if (call->error == 0) { - spin_lock(&vnode->lock); - trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0); -- vnode->locked_at = call->reply_time; -+ vnode->locked_at = call->issue_time; - afs_schedule_lock_extension(vnode); - spin_unlock(&vnode->lock); - } -diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c -index 4943413d9c5f7..7d37f63ef0f09 100644 ---- a/fs/afs/fsclient.c -+++ b/fs/afs/fsclient.c -@@ -131,7 +131,7 @@ bad: - - static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) - { -- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry; -+ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; - } - - static void xdr_decode_AFSCallBack(const __be32 **_bp, -diff --git a/fs/afs/internal.h b/fs/afs/internal.h -index a6f25d9e75b52..28bdd0387e5ea 100644 ---- a/fs/afs/internal.h -+++ b/fs/afs/internal.h -@@ -137,7 +137,6 @@ struct afs_call { - bool need_attention; /* T if RxRPC poked us */ - bool async; /* T if asynchronous */ - bool upgrade; /* T to request service upgrade */ -- bool have_reply_time; /* T if have got reply_time */ - bool intr; /* T if interruptible */ - bool unmarshalling_error; /* T if an unmarshalling error occurred */ - u16 service_id; /* Actual service ID (after upgrade) */ -@@ -151,7 +150,7 @@ struct afs_call { - } __attribute__((packed)); - __be64 tmp64; - }; -- ktime_t reply_time; /* Time of first reply packet */ -+ ktime_t issue_time; /* Time of issue of operation */ - }; - - struct afs_call_type { -diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c -index a5434f3e57c68..e3de7fea36435 100644 ---- a/fs/afs/rxrpc.c -+++ b/fs/afs/rxrpc.c -@@ -347,6 +347,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) - if (call->max_lifespan) - rxrpc_kernel_set_max_life(call->net->socket, rxcall, - call->max_lifespan); -+ call->issue_time = ktime_get_real(); - - /* send the request */ - iov[0].iov_base = call->request; -@@ -497,12 +498,6 @@ static void afs_deliver_to_call(struct afs_call *call) - return; - } - -- if (!call->have_reply_time && -- rxrpc_kernel_get_reply_time(call->net->socket, -- call->rxcall, -- &call->reply_time)) -- call->have_reply_time = true; -- - ret = call->type->deliver(call); - state = READ_ONCE(call->state); - if (ret == 0 && call->unmarshalling_error) -diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c -index fdc7d675b4b0c..11571cca86c19 100644 ---- a/fs/afs/yfsclient.c -+++ b/fs/afs/yfsclient.c -@@ -232,8 +232,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp, - struct afs_callback *cb = &scb->callback; - ktime_t cb_expiry; - -- cb_expiry = call->reply_time; -- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100); -+ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); - cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC); - scb->have_cb = true; - *_bp += xdr_size(x); -diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h -index 4d8acd7e63eb5..1bbc810574f22 100644 ---- a/fs/btrfs/ctree.h -+++ b/fs/btrfs/ctree.h -@@ -1065,8 +1065,6 @@ struct btrfs_fs_info { - - spinlock_t zone_active_bgs_lock; - struct list_head zone_active_bgs; -- /* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */ -- wait_queue_head_t zone_finish_wait; - - #ifdef CONFIG_BTRFS_FS_REF_VERIFY - spinlock_t ref_verify_lock; -diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c -index a2505cfc6bc10..781952c5a5c23 100644 ---- a/fs/btrfs/disk-io.c -+++ b/fs/btrfs/disk-io.c -@@ -3173,7 +3173,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) - init_waitqueue_head(&fs_info->transaction_blocked_wait); - init_waitqueue_head(&fs_info->async_submit_wait); - init_waitqueue_head(&fs_info->delayed_iputs_wait); -- init_waitqueue_head(&fs_info->zone_finish_wait); - - /* Usable values until the real ones are cached from the superblock */ - fs_info->nodesize = 4096; -diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c -index 61496ecb1e201..f79f8d7cffcf2 100644 ---- a/fs/btrfs/inode.c -+++ b/fs/btrfs/inode.c -@@ -1643,10 +1643,9 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode, - done_offset = end; - - if (done_offset == start) { -- struct btrfs_fs_info *info = inode->root->fs_info; -- -- wait_var_event(&info->zone_finish_wait, -- !test_bit(BTRFS_FS_NEED_ZONE_FINISH, &info->flags)); -+ wait_on_bit_io(&inode->root->fs_info->flags, -+ BTRFS_FS_NEED_ZONE_FINISH, -+ TASK_UNINTERRUPTIBLE); - continue; - } - -diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c -index b0c5b4738b1f7..17623e6410c5d 100644 ---- a/fs/btrfs/space-info.c -+++ b/fs/btrfs/space-info.c -@@ -199,7 +199,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) - ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); - - if (flags & BTRFS_BLOCK_GROUP_DATA) -- return SZ_1G; -+ return BTRFS_MAX_DATA_CHUNK_SIZE; - else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) - return SZ_32M; - -diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c -index 3460fd6743807..16e01fbdcec83 100644 ---- a/fs/btrfs/volumes.c -+++ b/fs/btrfs/volumes.c -@@ -5266,6 +5266,9 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl, - ctl->stripe_size); - } - -+ /* Stripe size should not go beyond 1G. */ -+ ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G); -+ - /* Align to BTRFS_STRIPE_LEN */ - ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN); - ctl->chunk_size = ctl->stripe_size * data_stripes; -diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c -index 31cb11daa8e82..1386362fad3b8 100644 ---- a/fs/btrfs/zoned.c -+++ b/fs/btrfs/zoned.c -@@ -421,10 +421,19 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) - * since btrfs adds the pages one by one to a bio, and btrfs cannot - * increase the metadata reservation even if it increases the number of - * extents, it is safe to stick with the limit. -+ * -+ * With the zoned emulation, we can have non-zoned device on the zoned -+ * mode. In this case, we don't have a valid max zone append size. So, -+ * use max_segments * PAGE_SIZE as the pseudo max_zone_append_size. - */ -- zone_info->max_zone_append_size = -- min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT, -- (u64)bdev_max_segments(bdev) << PAGE_SHIFT); -+ if (bdev_is_zoned(bdev)) { -+ zone_info->max_zone_append_size = min_t(u64, -+ (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT, -+ (u64)bdev_max_segments(bdev) << PAGE_SHIFT); -+ } else { -+ zone_info->max_zone_append_size = -+ (u64)bdev_max_segments(bdev) << PAGE_SHIFT; -+ } - if (!IS_ALIGNED(nr_sectors, zone_sectors)) - zone_info->nr_zones++; - -@@ -1178,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) - * offset. - */ - static int calculate_alloc_pointer(struct btrfs_block_group *cache, -- u64 *offset_ret) -+ u64 *offset_ret, bool new) - { - struct btrfs_fs_info *fs_info = cache->fs_info; - struct btrfs_root *root; -@@ -1188,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache, - int ret; - u64 length; - -+ /* -+ * Avoid tree lookups for a new block group, there's no use for it. -+ * It must always be 0. -+ * -+ * Also, we have a lock chain of extent buffer lock -> chunk mutex. -+ * For new a block group, this function is called from -+ * btrfs_make_block_group() which is already taking the chunk mutex. -+ * Thus, we cannot call calculate_alloc_pointer() which takes extent -+ * buffer locks to avoid deadlock. -+ */ -+ if (new) { -+ *offset_ret = 0; -+ return 0; -+ } -+ - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; -@@ -1323,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) - else - num_conventional++; - -+ /* -+ * Consider a zone as active if we can allow any number of -+ * active zones. -+ */ -+ if (!device->zone_info->max_active_zones) -+ __set_bit(i, active); -+ - if (!is_sequential) { - alloc_offsets[i] = WP_CONVENTIONAL; - continue; -@@ -1389,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) - __set_bit(i, active); - break; - } -- -- /* -- * Consider a zone as active if we can allow any number of -- * active zones. -- */ -- if (!device->zone_info->max_active_zones) -- __set_bit(i, active); - } - - if (num_sequential > 0) - cache->seq_zone = true; - - if (num_conventional > 0) { -- /* -- * Avoid calling calculate_alloc_pointer() for new BG. It -- * is no use for new BG. It must be always 0. -- * -- * Also, we have a lock chain of extent buffer lock -> -- * chunk mutex. For new BG, this function is called from -- * btrfs_make_block_group() which is already taking the -- * chunk mutex. Thus, we cannot call -- * calculate_alloc_pointer() which takes extent buffer -- * locks to avoid deadlock. -- */ -- - /* Zone capacity is always zone size in emulation */ - cache->zone_capacity = cache->length; -- if (new) { -- cache->alloc_offset = 0; -- goto out; -- } -- ret = calculate_alloc_pointer(cache, &last_alloc); -- if (ret || map->num_stripes == num_conventional) { -- if (!ret) -- cache->alloc_offset = last_alloc; -- else -- btrfs_err(fs_info, -+ ret = calculate_alloc_pointer(cache, &last_alloc, new); -+ if (ret) { -+ btrfs_err(fs_info, - "zoned: failed to determine allocation offset of bg %llu", -- cache->start); -+ cache->start); -+ goto out; -+ } else if (map->num_stripes == num_conventional) { -+ cache->alloc_offset = last_alloc; -+ cache->zone_is_active = 1; - goto out; - } - } -@@ -1495,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) - goto out; - } - -- if (cache->zone_is_active) { -- btrfs_get_block_group(cache); -- spin_lock(&fs_info->zone_active_bgs_lock); -- list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs); -- spin_unlock(&fs_info->zone_active_bgs_lock); -- } -- - out: - if (cache->alloc_offset > fs_info->zone_size) { - btrfs_err(fs_info, -@@ -1526,10 +1528,16 @@ out: - ret = -EIO; - } - -- if (!ret) -+ if (!ret) { - cache->meta_write_pointer = cache->alloc_offset + cache->start; -- -- if (ret) { -+ if (cache->zone_is_active) { -+ btrfs_get_block_group(cache); -+ spin_lock(&fs_info->zone_active_bgs_lock); -+ list_add_tail(&cache->active_bg_list, -+ &fs_info->zone_active_bgs); -+ spin_unlock(&fs_info->zone_active_bgs_lock); -+ } -+ } else { - kfree(cache->physical_map); - cache->physical_map = NULL; - } -@@ -2007,8 +2015,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ - /* For active_bg_list */ - btrfs_put_block_group(block_group); - -- clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags); -- wake_up_all(&fs_info->zone_finish_wait); -+ clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags); - - return 0; - } -diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c -index f5dcc4940b6da..9dfd2dd612c25 100644 ---- a/fs/cifs/smb2file.c -+++ b/fs/cifs/smb2file.c -@@ -61,7 +61,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, - nr_ioctl_req.Reserved = 0; - rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, - fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, -- true /* is_fsctl */, - (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), - CIFSMaxBufSize, NULL, NULL /* no return info */); - if (rc == -EOPNOTSUPP) { -diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c -index 3898ec2632dc4..e8a8daa82ed76 100644 ---- a/fs/cifs/smb2ops.c -+++ b/fs/cifs/smb2ops.c -@@ -680,7 +680,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) - struct cifs_ses *ses = tcon->ses; - - rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, -- FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, -+ FSCTL_QUERY_NETWORK_INTERFACE_INFO, - NULL /* no data input */, 0 /* no data input */, - CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); - if (rc == -EOPNOTSUPP) { -@@ -1609,9 +1609,8 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon, - struct resume_key_req *res_key; - - rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, -- FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, -- NULL, 0 /* no input */, CIFSMaxBufSize, -- (char **)&res_key, &ret_data_len); -+ FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, -+ CIFSMaxBufSize, (char **)&res_key, &ret_data_len); - - if (rc == -EOPNOTSUPP) { - pr_warn_once("Server share %s does not support copy range\n", tcon->treeName); -@@ -1753,7 +1752,7 @@ smb2_ioctl_query_info(const unsigned int xid, - rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; - - rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, -- qi.info_type, true, buffer, qi.output_buffer_length, -+ qi.info_type, buffer, qi.output_buffer_length, - CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - - MAX_SMB2_CLOSE_RESPONSE_SIZE); - free_req1_func = SMB2_ioctl_free; -@@ -1929,9 +1928,8 @@ smb2_copychunk_range(const unsigned int xid, - retbuf = NULL; - rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, - trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, -- true /* is_fsctl */, (char *)pcchunk, -- sizeof(struct copychunk_ioctl), CIFSMaxBufSize, -- (char **)&retbuf, &ret_data_len); -+ (char *)pcchunk, sizeof(struct copychunk_ioctl), -+ CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); - if (rc == 0) { - if (ret_data_len != - sizeof(struct copychunk_ioctl_rsp)) { -@@ -2091,7 +2089,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon, - - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, FSCTL_SET_SPARSE, -- true /* is_fctl */, - &setsparse, 1, CIFSMaxBufSize, NULL, NULL); - if (rc) { - tcon->broken_sparse_sup = true; -@@ -2174,7 +2171,6 @@ smb2_duplicate_extents(const unsigned int xid, - rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, - trgtfile->fid.volatile_fid, - FSCTL_DUPLICATE_EXTENTS_TO_FILE, -- true /* is_fsctl */, - (char *)&dup_ext_buf, - sizeof(struct duplicate_extents_to_file), - CIFSMaxBufSize, NULL, -@@ -2209,7 +2205,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, - return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, - FSCTL_SET_INTEGRITY_INFORMATION, -- true /* is_fsctl */, - (char *)&integr_info, - sizeof(struct fsctl_set_integrity_information_req), - CIFSMaxBufSize, NULL, -@@ -2262,7 +2257,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon, - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, - FSCTL_SRV_ENUMERATE_SNAPSHOTS, -- true /* is_fsctl */, - NULL, 0 /* no input data */, max_response_size, - (char **)&retbuf, - &ret_data_len); -@@ -2982,7 +2976,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, - do { - rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_DFS_GET_REFERRALS, -- true /* is_fsctl */, - (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, - (char **)&dfs_rsp, &dfs_rsp_size); - if (!is_retryable_error(rc)) -@@ -3189,8 +3182,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, - - rc = SMB2_ioctl_init(tcon, server, - &rqst[1], fid.persistent_fid, -- fid.volatile_fid, FSCTL_GET_REPARSE_POINT, -- true /* is_fctl */, NULL, 0, -+ fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0, - CIFSMaxBufSize - - MAX_SMB2_CREATE_RESPONSE_SIZE - - MAX_SMB2_CLOSE_RESPONSE_SIZE); -@@ -3370,8 +3362,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon, - - rc = SMB2_ioctl_init(tcon, server, - &rqst[1], COMPOUND_FID, -- COMPOUND_FID, FSCTL_GET_REPARSE_POINT, -- true /* is_fctl */, NULL, 0, -+ COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0, - CIFSMaxBufSize - - MAX_SMB2_CREATE_RESPONSE_SIZE - - MAX_SMB2_CLOSE_RESPONSE_SIZE); -@@ -3599,26 +3590,43 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb, - return pntsd; - } - -+static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon, -+ loff_t offset, loff_t len, unsigned int xid) -+{ -+ struct cifsFileInfo *cfile = file->private_data; -+ struct file_zero_data_information fsctl_buf; -+ -+ cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); -+ -+ fsctl_buf.FileOffset = cpu_to_le64(offset); -+ fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); -+ -+ return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, -+ cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, -+ (char *)&fsctl_buf, -+ sizeof(struct file_zero_data_information), -+ 0, NULL, NULL); -+} -+ - static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, - loff_t offset, loff_t len, bool keep_size) - { - struct cifs_ses *ses = tcon->ses; -- struct inode *inode; -- struct cifsInodeInfo *cifsi; -+ struct inode *inode = file_inode(file); -+ struct cifsInodeInfo *cifsi = CIFS_I(inode); - struct cifsFileInfo *cfile = file->private_data; -- struct file_zero_data_information fsctl_buf; - long rc; - unsigned int xid; - __le64 eof; - - xid = get_xid(); - -- inode = d_inode(cfile->dentry); -- cifsi = CIFS_I(inode); -- - trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, - ses->Suid, offset, len); - -+ inode_lock(inode); -+ filemap_invalidate_lock(inode->i_mapping); -+ - /* - * We zero the range through ioctl, so we need remove the page caches - * first, otherwise the data may be inconsistent with the server. -@@ -3626,26 +3634,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, - truncate_pagecache_range(inode, offset, offset + len - 1); - - /* if file not oplocked can't be sure whether asking to extend size */ -- if (!CIFS_CACHE_READ(cifsi)) -- if (keep_size == false) { -- rc = -EOPNOTSUPP; -- trace_smb3_zero_err(xid, cfile->fid.persistent_fid, -- tcon->tid, ses->Suid, offset, len, rc); -- free_xid(xid); -- return rc; -- } -- -- cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); -- -- fsctl_buf.FileOffset = cpu_to_le64(offset); -- fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); -+ rc = -EOPNOTSUPP; -+ if (keep_size == false && !CIFS_CACHE_READ(cifsi)) -+ goto zero_range_exit; - -- rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, -- cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true, -- (char *)&fsctl_buf, -- sizeof(struct file_zero_data_information), -- 0, NULL, NULL); -- if (rc) -+ rc = smb3_zero_data(file, tcon, offset, len, xid); -+ if (rc < 0) - goto zero_range_exit; - - /* -@@ -3658,6 +3652,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, - } - - zero_range_exit: -+ filemap_invalidate_unlock(inode->i_mapping); -+ inode_unlock(inode); - free_xid(xid); - if (rc) - trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid, -@@ -3702,7 +3698,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, - - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, -- true /* is_fctl */, (char *)&fsctl_buf, -+ (char *)&fsctl_buf, - sizeof(struct file_zero_data_information), - CIFSMaxBufSize, NULL, NULL); - filemap_invalidate_unlock(inode->i_mapping); -@@ -3764,7 +3760,7 @@ static int smb3_simple_fallocate_range(unsigned int xid, - in_data.length = cpu_to_le64(len); - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, -- FSCTL_QUERY_ALLOCATED_RANGES, true, -+ FSCTL_QUERY_ALLOCATED_RANGES, - (char *)&in_data, sizeof(in_data), - 1024 * sizeof(struct file_allocated_range_buffer), - (char **)&out_data, &out_data_len); -@@ -4085,7 +4081,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs - - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, -- FSCTL_QUERY_ALLOCATED_RANGES, true, -+ FSCTL_QUERY_ALLOCATED_RANGES, - (char *)&in_data, sizeof(in_data), - sizeof(struct file_allocated_range_buffer), - (char **)&out_data, &out_data_len); -@@ -4145,7 +4141,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon, - - rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, -- FSCTL_QUERY_ALLOCATED_RANGES, true, -+ FSCTL_QUERY_ALLOCATED_RANGES, - (char *)&in_data, sizeof(in_data), - 1024 * sizeof(struct file_allocated_range_buffer), - (char **)&out_data, &out_data_len); -diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c -index ba58d7fd54f9e..31d37afae741f 100644 ---- a/fs/cifs/smb2pdu.c -+++ b/fs/cifs/smb2pdu.c -@@ -1174,7 +1174,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) - } - - rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, -- FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, -+ FSCTL_VALIDATE_NEGOTIATE_INFO, - (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, - (char **)&pneg_rsp, &rsplen); - if (rc == -EOPNOTSUPP) { -@@ -3053,7 +3053,7 @@ int - SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, - struct smb_rqst *rqst, - u64 persistent_fid, u64 volatile_fid, u32 opcode, -- bool is_fsctl, char *in_data, u32 indatalen, -+ char *in_data, u32 indatalen, - __u32 max_response_size) - { - struct smb2_ioctl_req *req; -@@ -3128,10 +3128,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, - req->hdr.CreditCharge = - cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), - SMB2_MAX_BUFFER_SIZE)); -- if (is_fsctl) -- req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); -- else -- req->Flags = 0; -+ /* always an FSCTL (for now) */ -+ req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); - - /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ - if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) -@@ -3158,9 +3156,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst) - */ - int - SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, -- u64 volatile_fid, u32 opcode, bool is_fsctl, -- char *in_data, u32 indatalen, u32 max_out_data_len, -- char **out_data, u32 *plen /* returned data len */) -+ u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, -+ u32 max_out_data_len, char **out_data, -+ u32 *plen /* returned data len */) - { - struct smb_rqst rqst; - struct smb2_ioctl_rsp *rsp = NULL; -@@ -3202,7 +3200,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - - rc = SMB2_ioctl_init(tcon, server, - &rqst, persistent_fid, volatile_fid, opcode, -- is_fsctl, in_data, indatalen, max_out_data_len); -+ in_data, indatalen, max_out_data_len); - if (rc) - goto ioctl_exit; - -@@ -3294,7 +3292,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, - cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); - - rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, -- FSCTL_SET_COMPRESSION, true /* is_fsctl */, -+ FSCTL_SET_COMPRESSION, - (char *)&fsctl_input /* data input */, - 2 /* in data len */, CIFSMaxBufSize /* max out data */, - &ret_data /* out data */, NULL); -diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h -index a69f1eed1cfe5..d57d7202dc367 100644 ---- a/fs/cifs/smb2proto.h -+++ b/fs/cifs/smb2proto.h -@@ -147,13 +147,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, - extern void SMB2_open_free(struct smb_rqst *rqst); - extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, - u64 persistent_fid, u64 volatile_fid, u32 opcode, -- bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, -+ char *in_data, u32 indatalen, u32 maxoutlen, - char **out_data, u32 *plen /* returned data len */); - extern int SMB2_ioctl_init(struct cifs_tcon *tcon, - struct TCP_Server_Info *server, - struct smb_rqst *rqst, - u64 persistent_fid, u64 volatile_fid, u32 opcode, -- bool is_fsctl, char *in_data, u32 indatalen, -+ char *in_data, u32 indatalen, - __u32 max_response_size); - extern void SMB2_ioctl_free(struct smb_rqst *rqst); - extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, -diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c -index 3dcf0b8b4e932..232cfdf095aeb 100644 ---- a/fs/debugfs/inode.c -+++ b/fs/debugfs/inode.c -@@ -744,6 +744,28 @@ void debugfs_remove(struct dentry *dentry) - } - EXPORT_SYMBOL_GPL(debugfs_remove); - -+/** -+ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it -+ * @name: a pointer to a string containing the name of the item to look up. -+ * @parent: a pointer to the parent dentry of the item. -+ * -+ * This is the equlivant of doing something like -+ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting -+ * handled for the directory being looked up. -+ */ -+void debugfs_lookup_and_remove(const char *name, struct dentry *parent) -+{ -+ struct dentry *dentry; -+ -+ dentry = debugfs_lookup(name, parent); -+ if (!dentry) -+ return; -+ -+ debugfs_remove(dentry); -+ dput(dentry); -+} -+EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); -+ - /** - * debugfs_rename - rename a file/directory in the debugfs filesystem - * @old_dir: a pointer to the parent dentry for the renamed object. This -diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c -index 8e01d89c3319e..b5fd9d71e67f1 100644 ---- a/fs/erofs/fscache.c -+++ b/fs/erofs/fscache.c -@@ -222,8 +222,10 @@ static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) - - rreq = erofs_fscache_alloc_request(folio_mapping(folio), - folio_pos(folio), folio_size(folio)); -- if (IS_ERR(rreq)) -+ if (IS_ERR(rreq)) { -+ ret = PTR_ERR(rreq); - goto out; -+ } - - return erofs_fscache_read_folios_async(mdev.m_fscache->cookie, - rreq, mdev.m_pa); -@@ -301,8 +303,10 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio) - - rreq = erofs_fscache_alloc_request(folio_mapping(folio), - folio_pos(folio), folio_size(folio)); -- if (IS_ERR(rreq)) -+ if (IS_ERR(rreq)) { -+ ret = PTR_ERR(rreq); - goto out_unlock; -+ } - - pstart = mdev.m_pa + (pos - map.m_la); - return erofs_fscache_read_folios_async(mdev.m_fscache->cookie, -diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h -index cfee49d33b95a..a01cc82795a25 100644 ---- a/fs/erofs/internal.h -+++ b/fs/erofs/internal.h -@@ -195,7 +195,6 @@ struct erofs_workgroup { - atomic_t refcount; - }; - --#if defined(CONFIG_SMP) - static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) - { -@@ -224,34 +223,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) - return atomic_cond_read_relaxed(&grp->refcount, - VAL != EROFS_LOCKED_MAGIC); - } --#else --static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, -- int val) --{ -- preempt_disable(); -- /* no need to spin on UP platforms, let's just disable preemption. */ -- if (val != atomic_read(&grp->refcount)) { -- preempt_enable(); -- return false; -- } -- return true; --} -- --static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, -- int orig_val) --{ -- preempt_enable(); --} -- --static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) --{ -- int v = atomic_read(&grp->refcount); -- -- /* workgroup is never freezed on uniprocessor systems */ -- DBG_BUGON(v == EROFS_LOCKED_MAGIC); -- return v; --} --#endif /* !CONFIG_SMP */ - #endif /* !CONFIG_EROFS_FS_ZIP */ - - /* we strictly follow PAGE_SIZE and no buffer head yet */ -diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c -index 81d26abf486fa..da85b39791957 100644 ---- a/fs/tracefs/inode.c -+++ b/fs/tracefs/inode.c -@@ -141,6 +141,8 @@ struct tracefs_mount_opts { - kuid_t uid; - kgid_t gid; - umode_t mode; -+ /* Opt_* bitfield. */ -+ unsigned int opts; - }; - - enum { -@@ -241,6 +243,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) - kgid_t gid; - char *p; - -+ opts->opts = 0; - opts->mode = TRACEFS_DEFAULT_MODE; - - while ((p = strsep(&data, ",")) != NULL) { -@@ -275,24 +278,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) - * but traditionally tracefs has ignored all mount options - */ - } -+ -+ opts->opts |= BIT(token); - } - - return 0; - } - --static int tracefs_apply_options(struct super_block *sb) -+static int tracefs_apply_options(struct super_block *sb, bool remount) - { - struct tracefs_fs_info *fsi = sb->s_fs_info; - struct inode *inode = d_inode(sb->s_root); - struct tracefs_mount_opts *opts = &fsi->mount_opts; - -- inode->i_mode &= ~S_IALLUGO; -- inode->i_mode |= opts->mode; -+ /* -+ * On remount, only reset mode/uid/gid if they were provided as mount -+ * options. -+ */ -+ -+ if (!remount || opts->opts & BIT(Opt_mode)) { -+ inode->i_mode &= ~S_IALLUGO; -+ inode->i_mode |= opts->mode; -+ } - -- inode->i_uid = opts->uid; -+ if (!remount || opts->opts & BIT(Opt_uid)) -+ inode->i_uid = opts->uid; - -- /* Set all the group ids to the mount option */ -- set_gid(sb->s_root, opts->gid); -+ if (!remount || opts->opts & BIT(Opt_gid)) { -+ /* Set all the group ids to the mount option */ -+ set_gid(sb->s_root, opts->gid); -+ } - - return 0; - } -@@ -307,7 +322,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data) - if (err) - goto fail; - -- tracefs_apply_options(sb); -+ tracefs_apply_options(sb, true); - - fail: - return err; -@@ -359,7 +374,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent) - - sb->s_op = &tracefs_super_operations; - -- tracefs_apply_options(sb); -+ tracefs_apply_options(sb, false); - - return 0; - -diff --git a/include/kunit/test.h b/include/kunit/test.h -index 8ffcd7de96070..648dbb00a3008 100644 ---- a/include/kunit/test.h -+++ b/include/kunit/test.h -@@ -863,7 +863,7 @@ do { \ - - #define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_INT_ASSERTION(test, \ -- KUNIT_ASSERTION, \ -+ KUNIT_EXPECTATION, \ - left, <=, right, \ - fmt, \ - ##__VA_ARGS__) -@@ -1153,7 +1153,7 @@ do { \ - - #define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_INT_ASSERTION(test, \ -- KUNIT_EXPECTATION, \ -+ KUNIT_ASSERTION, \ - left, <, right, \ - fmt, \ - ##__VA_ARGS__) -@@ -1194,7 +1194,7 @@ do { \ - - #define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_INT_ASSERTION(test, \ -- KUNIT_EXPECTATION, \ -+ KUNIT_ASSERTION, \ - left, >, right, \ - fmt, \ - ##__VA_ARGS__) -diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h -index badcc0e3418f2..262664107b839 100644 ---- a/include/linux/buffer_head.h -+++ b/include/linux/buffer_head.h -@@ -136,6 +136,17 @@ BUFFER_FNS(Defer_Completion, defer_completion) - - static __always_inline void set_buffer_uptodate(struct buffer_head *bh) - { -+ /* -+ * If somebody else already set this uptodate, they will -+ * have done the memory barrier, and a reader will thus -+ * see *some* valid buffer state. -+ * -+ * Any other serialization (with IO errors or whatever that -+ * might clear the bit) has to come from other state (eg BH_Lock). -+ */ -+ if (test_bit(BH_Uptodate, &bh->b_state)) -+ return; -+ - /* - * make it consistent with folio_mark_uptodate - * pairs with smp_load_acquire in buffer_uptodate -diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h -index c869f1e73d755..f60674692d365 100644 ---- a/include/linux/debugfs.h -+++ b/include/linux/debugfs.h -@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name, - void debugfs_remove(struct dentry *dentry); - #define debugfs_remove_recursive debugfs_remove - -+void debugfs_lookup_and_remove(const char *name, struct dentry *parent); -+ - const struct file_operations *debugfs_real_fops(const struct file *filp); - - int debugfs_file_get(struct dentry *dentry); -@@ -225,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry) - static inline void debugfs_remove_recursive(struct dentry *dentry) - { } - -+static inline void debugfs_lookup_and_remove(const char *name, -+ struct dentry *parent) -+{ } -+ - const struct file_operations *debugfs_real_fops(const struct file *filp); - - static inline int debugfs_file_get(struct dentry *dentry) -diff --git a/include/linux/dmar.h b/include/linux/dmar.h -index cbd714a198a0a..f3a3d95df5325 100644 ---- a/include/linux/dmar.h -+++ b/include/linux/dmar.h -@@ -69,6 +69,7 @@ struct dmar_pci_notify_info { - - extern struct rw_semaphore dmar_global_lock; - extern struct list_head dmar_drhd_units; -+extern int intel_iommu_enabled; - - #define for_each_drhd_unit(drhd) \ - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \ -@@ -92,7 +93,8 @@ extern struct list_head dmar_drhd_units; - static inline bool dmar_rcu_check(void) - { - return rwsem_is_locked(&dmar_global_lock) || -- system_state == SYSTEM_BOOTING; -+ system_state == SYSTEM_BOOTING || -+ (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled); - } - - #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) -diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h -index eafa1d2489fda..4e94755098f19 100644 ---- a/include/linux/lsm_hook_defs.h -+++ b/include/linux/lsm_hook_defs.h -@@ -406,4 +406,5 @@ LSM_HOOK(int, 0, perf_event_write, struct perf_event *event) - #ifdef CONFIG_IO_URING - LSM_HOOK(int, 0, uring_override_creds, const struct cred *new) - LSM_HOOK(int, 0, uring_sqpoll, void) -+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd) - #endif /* CONFIG_IO_URING */ -diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h -index 91c8146649f59..b681cfce6190a 100644 ---- a/include/linux/lsm_hooks.h -+++ b/include/linux/lsm_hooks.h -@@ -1575,6 +1575,9 @@ - * Check whether the current task is allowed to spawn a io_uring polling - * thread (IORING_SETUP_SQPOLL). - * -+ * @uring_cmd: -+ * Check whether the file_operations uring_cmd is allowed to run. -+ * - */ - union security_list_options { - #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__); -diff --git a/include/linux/security.h b/include/linux/security.h -index 7fc4e9f49f542..3cc127bb5bfd4 100644 ---- a/include/linux/security.h -+++ b/include/linux/security.h -@@ -2051,6 +2051,7 @@ static inline int security_perf_event_write(struct perf_event *event) - #ifdef CONFIG_SECURITY - extern int security_uring_override_creds(const struct cred *new); - extern int security_uring_sqpoll(void); -+extern int security_uring_cmd(struct io_uring_cmd *ioucmd); - #else - static inline int security_uring_override_creds(const struct cred *new) - { -@@ -2060,6 +2061,10 @@ static inline int security_uring_sqpoll(void) - { - return 0; - } -+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd) -+{ -+ return 0; -+} - #endif /* CONFIG_SECURITY */ - #endif /* CONFIG_IO_URING */ - -diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h -index 2f41364a6791e..63d0a21b63162 100644 ---- a/include/linux/skbuff.h -+++ b/include/linux/skbuff.h -@@ -2528,6 +2528,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) - return skb_headlen(skb) + __skb_pagelen(skb); - } - -+static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, -+ int i, struct page *page, -+ int off, int size) -+{ -+ skb_frag_t *frag = &shinfo->frags[i]; -+ -+ /* -+ * Propagate page pfmemalloc to the skb if we can. The problem is -+ * that not all callers have unique ownership of the page but rely -+ * on page_is_pfmemalloc doing the right thing(tm). -+ */ -+ frag->bv_page = page; -+ frag->bv_offset = off; -+ skb_frag_size_set(frag, size); -+} -+ - /** - * __skb_fill_page_desc - initialise a paged fragment in an skb - * @skb: buffer containing fragment to be initialised -@@ -2544,17 +2560,7 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) - static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, - struct page *page, int off, int size) - { -- skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -- -- /* -- * Propagate page pfmemalloc to the skb if we can. The problem is -- * that not all callers have unique ownership of the page but rely -- * on page_is_pfmemalloc doing the right thing(tm). -- */ -- frag->bv_page = page; -- frag->bv_offset = off; -- skb_frag_size_set(frag, size); -- -+ __skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size); - page = compound_head(page); - if (page_is_pfmemalloc(page)) - skb->pfmemalloc = true; -@@ -2581,6 +2587,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, - skb_shinfo(skb)->nr_frags = i + 1; - } - -+/** -+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb -+ * @skb: buffer containing fragment to be initialised -+ * @i: paged fragment index to initialise -+ * @page: the page to use for this fragment -+ * @off: the offset to the data with @page -+ * @size: the length of the data -+ * -+ * Variant of skb_fill_page_desc() which does not deal with -+ * pfmemalloc, if page is not owned by us. -+ */ -+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i, -+ struct page *page, int off, -+ int size) -+{ -+ struct skb_shared_info *shinfo = skb_shinfo(skb); -+ -+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size); -+ shinfo->nr_frags = i + 1; -+} -+ - void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, - int size, unsigned int truesize); - -diff --git a/include/linux/time64.h b/include/linux/time64.h -index 81b9686a20799..2fb8232cff1d5 100644 ---- a/include/linux/time64.h -+++ b/include/linux/time64.h -@@ -20,6 +20,9 @@ struct itimerspec64 { - struct timespec64 it_value; - }; - -+/* Parameters used to convert the timespec values: */ -+#define PSEC_PER_NSEC 1000L -+ - /* Located here for timespec[64]_valid_strict */ - #define TIME64_MAX ((s64)~((u64)1 << 63)) - #define TIME64_MIN (-TIME64_MAX - 1) -diff --git a/include/linux/udp.h b/include/linux/udp.h -index 254a2654400f8..e96da4157d04d 100644 ---- a/include/linux/udp.h -+++ b/include/linux/udp.h -@@ -70,6 +70,7 @@ struct udp_sock { - * For encapsulation sockets. - */ - int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); -+ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset); - int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); - void (*encap_destroy)(struct sock *sk); - -diff --git a/include/net/bonding.h b/include/net/bonding.h -index cb904d356e31e..3b816ae8b1f3b 100644 ---- a/include/net/bonding.h -+++ b/include/net/bonding.h -@@ -161,8 +161,9 @@ struct slave { - struct net_device *dev; /* first - useful for panic debug */ - struct bonding *bond; /* our master */ - int delay; -- /* all three in jiffies */ -+ /* all 4 in jiffies */ - unsigned long last_link_up; -+ unsigned long last_tx; - unsigned long last_rx; - unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; - s8 link; /* one of BOND_LINK_XXXX */ -@@ -539,6 +540,16 @@ static inline unsigned long slave_last_rx(struct bonding *bond, - return slave->last_rx; - } - -+static inline void slave_update_last_tx(struct slave *slave) -+{ -+ WRITE_ONCE(slave->last_tx, jiffies); -+} -+ -+static inline unsigned long slave_last_tx(struct slave *slave) -+{ -+ return READ_ONCE(slave->last_tx); -+} -+ - #ifdef CONFIG_NET_POLL_CONTROLLER - static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, - struct sk_buff *skb) -diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h -index afc7ce713657b..72394f441dad8 100644 ---- a/include/net/udp_tunnel.h -+++ b/include/net/udp_tunnel.h -@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net, - typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); - typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk, - struct sk_buff *skb); -+typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk, -+ struct sk_buff *skb, -+ unsigned int udp_offset); - typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); - typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, - struct list_head *head, -@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg { - __u8 encap_type; - udp_tunnel_encap_rcv_t encap_rcv; - udp_tunnel_encap_err_lookup_t encap_err_lookup; -+ udp_tunnel_encap_err_rcv_t encap_err_rcv; - udp_tunnel_encap_destroy_t encap_destroy; - udp_tunnel_gro_receive_t gro_receive; - udp_tunnel_gro_complete_t gro_complete; -diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h -index 9e17247474fa9..6ce3bd22f6c69 100644 ---- a/include/soc/at91/sama7-ddr.h -+++ b/include/soc/at91/sama7-ddr.h -@@ -38,6 +38,14 @@ - #define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */ - - #define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */ -+#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */ -+#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */ -+#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */ -+#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */ -+ -+#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */ -+#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */ -+#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */ - - /* UDDRC */ - #define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */ -diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c -index cd155b7e1346d..48833d0edd089 100644 ---- a/io_uring/io_uring.c -+++ b/io_uring/io_uring.c -@@ -4878,6 +4878,10 @@ static int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) - if (!req->file->f_op->uring_cmd) - return -EOPNOTSUPP; - -+ ret = security_uring_cmd(ioucmd); -+ if (ret) -+ return ret; -+ - if (ctx->flags & IORING_SETUP_SQE128) - issue_flags |= IO_URING_F_SQE128; - if (ctx->flags & IORING_SETUP_CQE32) -@@ -8260,6 +8264,7 @@ static void io_queue_async(struct io_kiocb *req, int ret) - - switch (io_arm_poll_handler(req, 0)) { - case IO_APOLL_READY: -+ io_kbuf_recycle(req, 0); - io_req_task_queue(req); - break; - case IO_APOLL_ABORTED: -diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c -index ce95aee05e8ae..e702ca368539a 100644 ---- a/kernel/cgroup/cgroup.c -+++ b/kernel/cgroup/cgroup.c -@@ -2346,6 +2346,47 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) - } - EXPORT_SYMBOL_GPL(task_cgroup_path); - -+/** -+ * cgroup_attach_lock - Lock for ->attach() -+ * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem -+ * -+ * cgroup migration sometimes needs to stabilize threadgroups against forks and -+ * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() -+ * implementations (e.g. cpuset), also need to disable CPU hotplug. -+ * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can -+ * lead to deadlocks. -+ * -+ * Bringing up a CPU may involve creating and destroying tasks which requires -+ * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside -+ * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while -+ * write-locking threadgroup_rwsem, the locking order is reversed and we end up -+ * waiting for an on-going CPU hotplug operation which in turn is waiting for -+ * the threadgroup_rwsem to be released to create new tasks. For more details: -+ * -+ * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu -+ * -+ * Resolve the situation by always acquiring cpus_read_lock() before optionally -+ * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that -+ * CPU hotplug is disabled on entry. -+ */ -+static void cgroup_attach_lock(bool lock_threadgroup) -+{ -+ cpus_read_lock(); -+ if (lock_threadgroup) -+ percpu_down_write(&cgroup_threadgroup_rwsem); -+} -+ -+/** -+ * cgroup_attach_unlock - Undo cgroup_attach_lock() -+ * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem -+ */ -+static void cgroup_attach_unlock(bool lock_threadgroup) -+{ -+ if (lock_threadgroup) -+ percpu_up_write(&cgroup_threadgroup_rwsem); -+ cpus_read_unlock(); -+} -+ - /** - * cgroup_migrate_add_task - add a migration target task to a migration context - * @task: target task -@@ -2822,8 +2863,7 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, - } - - struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, -- bool *locked) -- __acquires(&cgroup_threadgroup_rwsem) -+ bool *threadgroup_locked) - { - struct task_struct *tsk; - pid_t pid; -@@ -2840,12 +2880,8 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, - * Therefore, we can skip the global lock. - */ - lockdep_assert_held(&cgroup_mutex); -- if (pid || threadgroup) { -- percpu_down_write(&cgroup_threadgroup_rwsem); -- *locked = true; -- } else { -- *locked = false; -- } -+ *threadgroup_locked = pid || threadgroup; -+ cgroup_attach_lock(*threadgroup_locked); - - rcu_read_lock(); - if (pid) { -@@ -2876,17 +2912,14 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, - goto out_unlock_rcu; - - out_unlock_threadgroup: -- if (*locked) { -- percpu_up_write(&cgroup_threadgroup_rwsem); -- *locked = false; -- } -+ cgroup_attach_unlock(*threadgroup_locked); -+ *threadgroup_locked = false; - out_unlock_rcu: - rcu_read_unlock(); - return tsk; - } - --void cgroup_procs_write_finish(struct task_struct *task, bool locked) -- __releases(&cgroup_threadgroup_rwsem) -+void cgroup_procs_write_finish(struct task_struct *task, bool threadgroup_locked) - { - struct cgroup_subsys *ss; - int ssid; -@@ -2894,8 +2927,8 @@ void cgroup_procs_write_finish(struct task_struct *task, bool locked) - /* release reference from cgroup_procs_write_start() */ - put_task_struct(task); - -- if (locked) -- percpu_up_write(&cgroup_threadgroup_rwsem); -+ cgroup_attach_unlock(threadgroup_locked); -+ - for_each_subsys(ss, ssid) - if (ss->post_attach) - ss->post_attach(); -@@ -2950,12 +2983,11 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) - struct cgroup_subsys_state *d_css; - struct cgroup *dsct; - struct css_set *src_cset; -+ bool has_tasks; - int ret; - - lockdep_assert_held(&cgroup_mutex); - -- percpu_down_write(&cgroup_threadgroup_rwsem); -- - /* look up all csses currently attached to @cgrp's subtree */ - spin_lock_irq(&css_set_lock); - cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { -@@ -2966,6 +2998,15 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) - } - spin_unlock_irq(&css_set_lock); - -+ /* -+ * We need to write-lock threadgroup_rwsem while migrating tasks. -+ * However, if there are no source csets for @cgrp, changing its -+ * controllers isn't gonna produce any task migrations and the -+ * write-locking can be skipped safely. -+ */ -+ has_tasks = !list_empty(&mgctx.preloaded_src_csets); -+ cgroup_attach_lock(has_tasks); -+ - /* NULL dst indicates self on default hierarchy */ - ret = cgroup_migrate_prepare_dst(&mgctx); - if (ret) -@@ -2985,7 +3026,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) - ret = cgroup_migrate_execute(&mgctx); - out_finish: - cgroup_migrate_finish(&mgctx); -- percpu_up_write(&cgroup_threadgroup_rwsem); -+ cgroup_attach_unlock(has_tasks); - return ret; - } - -@@ -4933,13 +4974,13 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, - struct task_struct *task; - const struct cred *saved_cred; - ssize_t ret; -- bool locked; -+ bool threadgroup_locked; - - dst_cgrp = cgroup_kn_lock_live(of->kn, false); - if (!dst_cgrp) - return -ENODEV; - -- task = cgroup_procs_write_start(buf, threadgroup, &locked); -+ task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked); - ret = PTR_ERR_OR_ZERO(task); - if (ret) - goto out_unlock; -@@ -4965,7 +5006,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, - ret = cgroup_attach_task(dst_cgrp, task, threadgroup); - - out_finish: -- cgroup_procs_write_finish(task, locked); -+ cgroup_procs_write_finish(task, threadgroup_locked); - out_unlock: - cgroup_kn_unlock(of->kn); - -diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index 58aadfda9b8b3..1f3a55297f39d 100644 ---- a/kernel/cgroup/cpuset.c -+++ b/kernel/cgroup/cpuset.c -@@ -2289,7 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) - cgroup_taskset_first(tset, &css); - cs = css_cs(css); - -- cpus_read_lock(); -+ lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ - percpu_down_write(&cpuset_rwsem); - - guarantee_online_mems(cs, &cpuset_attach_nodemask_to); -@@ -2343,7 +2343,6 @@ static void cpuset_attach(struct cgroup_taskset *tset) - wake_up(&cpuset_attach_wq); - - percpu_up_write(&cpuset_rwsem); -- cpus_read_unlock(); - } - - /* The various types of files and directories in a cpuset file system */ -diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c -index 5830dce6081b3..ce34d50f7a9bb 100644 ---- a/kernel/dma/swiotlb.c -+++ b/kernel/dma/swiotlb.c -@@ -464,7 +464,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size - } - } - --#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) -+static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) -+{ -+ return start + (idx << IO_TLB_SHIFT); -+} - - /* - * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. -diff --git a/kernel/fork.c b/kernel/fork.c -index 9d44f2d46c696..d587c85f35b1e 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -1225,6 +1225,7 @@ void mmput_async(struct mm_struct *mm) - schedule_work(&mm->async_put_work); - } - } -+EXPORT_SYMBOL_GPL(mmput_async); - #endif - - /** -diff --git a/kernel/kprobes.c b/kernel/kprobes.c -index 08350e35aba24..ca9d834d0b843 100644 ---- a/kernel/kprobes.c -+++ b/kernel/kprobes.c -@@ -1562,6 +1562,7 @@ static int check_kprobe_address_safe(struct kprobe *p, - /* Ensure it is not in reserved area nor out of text */ - if (!(core_kernel_text((unsigned long) p->addr) || - is_module_text_address((unsigned long) p->addr)) || -+ in_gate_area_no_mm((unsigned long) p->addr) || - within_kprobe_blacklist((unsigned long) p->addr) || - jump_label_text_reserved(p->addr, p->addr) || - static_call_text_reserved(p->addr, p->addr) || -diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index bb3d63bdf4ae8..667876da8382d 100644 ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -416,7 +416,7 @@ void update_sched_domain_debugfs(void) - char buf[32]; - - snprintf(buf, sizeof(buf), "cpu%d", cpu); -- debugfs_remove(debugfs_lookup(buf, sd_dentry)); -+ debugfs_lookup_and_remove(buf, sd_dentry); - d_cpu = debugfs_create_dir(buf, sd_dentry); - - i = 0; -diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c -index cb866c3141af2..918730d749325 100644 ---- a/kernel/trace/trace_events_trigger.c -+++ b/kernel/trace/trace_events_trigger.c -@@ -142,7 +142,8 @@ static bool check_user_trigger(struct trace_event_file *file) - { - struct event_trigger_data *data; - -- list_for_each_entry_rcu(data, &file->triggers, list) { -+ list_for_each_entry_rcu(data, &file->triggers, list, -+ lockdep_is_held(&event_mutex)) { - if (data->flags & EVENT_TRIGGER_FL_PROBE) - continue; - return true; -diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c -index 95b58bd757ce4..1e130da1b742c 100644 ---- a/kernel/trace/trace_preemptirq.c -+++ b/kernel/trace/trace_preemptirq.c -@@ -95,14 +95,14 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr) - } - - lockdep_hardirqs_on_prepare(); -- lockdep_hardirqs_on(CALLER_ADDR0); -+ lockdep_hardirqs_on(caller_addr); - } - EXPORT_SYMBOL(trace_hardirqs_on_caller); - NOKPROBE_SYMBOL(trace_hardirqs_on_caller); - - __visible void trace_hardirqs_off_caller(unsigned long caller_addr) - { -- lockdep_hardirqs_off(CALLER_ADDR0); -+ lockdep_hardirqs_off(caller_addr); - - if (!this_cpu_read(tracing_irq_cpu)) { - this_cpu_write(tracing_irq_cpu, 1); -diff --git a/mm/kmemleak.c b/mm/kmemleak.c -index a182f5ddaf68b..acd7cbb82e160 100644 ---- a/mm/kmemleak.c -+++ b/mm/kmemleak.c -@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); - void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, - gfp_t gfp) - { -- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) -+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) - kmemleak_alloc(__va(phys), size, min_count, gfp); - } - EXPORT_SYMBOL(kmemleak_alloc_phys); -@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); - */ - void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) - { -- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) -+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) - kmemleak_free_part(__va(phys), size); - } - EXPORT_SYMBOL(kmemleak_free_part_phys); -@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); - */ - void __ref kmemleak_not_leak_phys(phys_addr_t phys) - { -- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) -+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) - kmemleak_not_leak(__va(phys)); - } - EXPORT_SYMBOL(kmemleak_not_leak_phys); -@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); - */ - void __ref kmemleak_ignore_phys(phys_addr_t phys) - { -- if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) -+ if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) - kmemleak_ignore(__va(phys)); - } - EXPORT_SYMBOL(kmemleak_ignore_phys); -diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c -index ff47790366497..f20f4373ff408 100644 ---- a/net/bridge/br_netfilter_hooks.c -+++ b/net/bridge/br_netfilter_hooks.c -@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ - /* - Bridged-and-DNAT'ed traffic doesn't - * require ip_forwarding. */ - if (rt->dst.dev == dev) { -+ skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - goto bridged_dnat; - } -@@ -413,6 +414,7 @@ bridged_dnat: - kfree_skb(skb); - return 0; - } -+ skb_dst_drop(skb); - skb_dst_set_noref(skb, &rt->dst); - } - -diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c -index e4e0c836c3f51..6b07f30675bb0 100644 ---- a/net/bridge/br_netfilter_ipv6.c -+++ b/net/bridge/br_netfilter_ipv6.c -@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc - kfree_skb(skb); - return 0; - } -+ skb_dst_drop(skb); - skb_dst_set_noref(skb, &rt->dst); - } - -diff --git a/net/core/datagram.c b/net/core/datagram.c -index 50f4faeea76cc..48e82438acb02 100644 ---- a/net/core/datagram.c -+++ b/net/core/datagram.c -@@ -675,7 +675,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, - page_ref_sub(last_head, refs); - refs = 0; - } -- skb_fill_page_desc(skb, frag++, head, start, size); -+ skb_fill_page_desc_noacc(skb, frag++, head, start, size); - } - if (refs) - page_ref_sub(last_head, refs); -diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index bebf58464d667..4b2b07a9422cf 100644 ---- a/net/core/skbuff.c -+++ b/net/core/skbuff.c -@@ -4179,9 +4179,8 @@ normal: - SKB_GSO_CB(nskb)->csum_start = - skb_headroom(nskb) + doffset; - } else { -- skb_copy_bits(head_skb, offset, -- skb_put(nskb, len), -- len); -+ if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) -+ goto err; - } - continue; - } -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index 3d446773ff2a5..ab03977b65781 100644 ---- a/net/ipv4/tcp.c -+++ b/net/ipv4/tcp.c -@@ -1015,7 +1015,7 @@ new_segment: - skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); - } else { - get_page(page); -- skb_fill_page_desc(skb, i, page, offset, copy); -+ skb_fill_page_desc_noacc(skb, i, page, offset, copy); - } - - if (!(flags & MSG_NO_SHARED_FRAGS)) -diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c -index e5435156e545d..c30696eafc361 100644 ---- a/net/ipv4/tcp_input.c -+++ b/net/ipv4/tcp_input.c -@@ -2514,6 +2514,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp) - return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); - } - -+static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) -+{ -+ struct tcp_sock *tp = tcp_sk(sk); -+ -+ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { -+ /* Hold old state until something *above* high_seq -+ * is ACKed. For Reno it is MUST to prevent false -+ * fast retransmits (RFC2582). SACK TCP is safe. */ -+ if (!tcp_any_retrans_done(sk)) -+ tp->retrans_stamp = 0; -+ return true; -+ } -+ return false; -+} -+ - /* People celebrate: "We love our President!" */ - static bool tcp_try_undo_recovery(struct sock *sk) - { -@@ -2536,14 +2551,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) - } else if (tp->rack.reo_wnd_persist) { - tp->rack.reo_wnd_persist--; - } -- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { -- /* Hold old state until something *above* high_seq -- * is ACKed. For Reno it is MUST to prevent false -- * fast retransmits (RFC2582). SACK TCP is safe. */ -- if (!tcp_any_retrans_done(sk)) -- tp->retrans_stamp = 0; -+ if (tcp_is_non_sack_preventing_reopen(sk)) - return true; -- } - tcp_set_ca_state(sk, TCP_CA_Open); - tp->is_sack_reneg = 0; - return false; -@@ -2579,6 +2588,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPSPURIOUSRTOS); - inet_csk(sk)->icsk_retransmits = 0; -+ if (tcp_is_non_sack_preventing_reopen(sk)) -+ return true; - if (frto_undo || tcp_is_sack(tp)) { - tcp_set_ca_state(sk, TCP_CA_Open); - tp->is_sack_reneg = 0; -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c -index aa9f2ec3dc468..01e1d36bdf135 100644 ---- a/net/ipv4/udp.c -+++ b/net/ipv4/udp.c -@@ -781,6 +781,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) - */ - if (tunnel) { - /* ...not for tunnels though: we don't have a sending socket */ -+ if (udp_sk(sk)->encap_err_rcv) -+ udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2); - goto out; - } - if (!inet->recverr) { -diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c -index 8efaf8c3fe2a9..8242c8947340e 100644 ---- a/net/ipv4/udp_tunnel_core.c -+++ b/net/ipv4/udp_tunnel_core.c -@@ -72,6 +72,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, - - udp_sk(sk)->encap_type = cfg->encap_type; - udp_sk(sk)->encap_rcv = cfg->encap_rcv; -+ udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv; - udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup; - udp_sk(sk)->encap_destroy = cfg->encap_destroy; - udp_sk(sk)->gro_receive = cfg->gro_receive; -diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c -index b738eb7e1cae8..04cf06866e765 100644 ---- a/net/ipv6/addrconf.c -+++ b/net/ipv6/addrconf.c -@@ -3557,11 +3557,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, - fallthrough; - case NETDEV_UP: - case NETDEV_CHANGE: -- if (dev->flags & IFF_SLAVE) -+ if (idev && idev->cnf.disable_ipv6) - break; - -- if (idev && idev->cnf.disable_ipv6) -+ if (dev->flags & IFF_SLAVE) { -+ if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) && -+ dev->flags & IFF_UP && dev->flags & IFF_MULTICAST) -+ ipv6_mc_up(idev); - break; -+ } - - if (event == NETDEV_UP) { - /* restore routes for permanent addresses */ -diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c -index 73aaabf0e9665..0b0e34ddc64e0 100644 ---- a/net/ipv6/seg6.c -+++ b/net/ipv6/seg6.c -@@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info) - goto out_unlock; - } - -+ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) { -+ err = -EINVAL; -+ goto out_unlock; -+ } -+ - if (hinfo) { - err = seg6_hmac_info_del(net, hmackeyid); - if (err) -diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c -index e2f2e087a7531..40074bc7274ea 100644 ---- a/net/ipv6/udp.c -+++ b/net/ipv6/udp.c -@@ -616,8 +616,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, - } - - /* Tunnels don't have an application socket: don't pass errors back */ -- if (tunnel) -+ if (tunnel) { -+ if (udp_sk(sk)->encap_err_rcv) -+ udp_sk(sk)->encap_err_rcv(sk, skb, offset); - goto out; -+ } - - if (!np->recverr) { - if (!harderr || sk->sk_state != TCP_ESTABLISHED) -diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c -index 1796c456ac98b..992decbcaa5c1 100644 ---- a/net/netfilter/nf_conntrack_irc.c -+++ b/net/netfilter/nf_conntrack_irc.c -@@ -194,8 +194,9 @@ static int help(struct sk_buff *skb, unsigned int protoff, - - /* dcc_ip can be the internal OR external (NAT'ed) IP */ - tuple = &ct->tuplehash[dir].tuple; -- if (tuple->src.u3.ip != dcc_ip && -- tuple->dst.u3.ip != dcc_ip) { -+ if ((tuple->src.u3.ip != dcc_ip && -+ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) || -+ dcc_port == 0) { - net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n", - &tuple->src.u3.ip, - &dcc_ip, dcc_port); -diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c -index a63b51dceaf2c..a634c72b1ffcf 100644 ---- a/net/netfilter/nf_conntrack_proto_tcp.c -+++ b/net/netfilter/nf_conntrack_proto_tcp.c -@@ -655,6 +655,37 @@ static bool tcp_in_window(struct nf_conn *ct, - tn->tcp_be_liberal) - res = true; - if (!res) { -+ bool seq_ok = before(seq, sender->td_maxend + 1); -+ -+ if (!seq_ok) { -+ u32 overshot = end - sender->td_maxend + 1; -+ bool ack_ok; -+ -+ ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1); -+ -+ if (in_recv_win && -+ ack_ok && -+ overshot <= receiver->td_maxwin && -+ before(sack, receiver->td_end + 1)) { -+ /* Work around TCPs that send more bytes than allowed by -+ * the receive window. -+ * -+ * If the (marked as invalid) packet is allowed to pass by -+ * the ruleset and the peer acks this data, then its possible -+ * all future packets will trigger 'ACK is over upper bound' check. -+ * -+ * Thus if only the sequence check fails then do update td_end so -+ * possible ACK for this data can update internal state. -+ */ -+ sender->td_end = end; -+ sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; -+ -+ nf_ct_l4proto_log_invalid(skb, ct, hook_state, -+ "%u bytes more than expected", overshot); -+ return res; -+ } -+ } -+ - nf_ct_l4proto_log_invalid(skb, ct, hook_state, - "%s", - before(seq, sender->td_maxend + 1) ? -diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c -index bc690238a3c56..848cc81d69926 100644 ---- a/net/netfilter/nf_tables_api.c -+++ b/net/netfilter/nf_tables_api.c -@@ -2166,8 +2166,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, - chain->flags |= NFT_CHAIN_BASE | flags; - basechain->policy = NF_ACCEPT; - if (chain->flags & NFT_CHAIN_HW_OFFLOAD && -- !nft_chain_offload_support(basechain)) -+ !nft_chain_offload_support(basechain)) { -+ list_splice_init(&basechain->hook_list, &hook->list); - return -EOPNOTSUPP; -+ } - - flow_block_init(&basechain->flow_block); - -diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h -index 571436064cd6f..62c70709d7980 100644 ---- a/net/rxrpc/ar-internal.h -+++ b/net/rxrpc/ar-internal.h -@@ -982,6 +982,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *); - /* - * peer_event.c - */ -+void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset); - void rxrpc_error_report(struct sock *); - void rxrpc_peer_keepalive_worker(struct work_struct *); - -diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c -index 96ecb7356c0fe..79bb02eb67b2b 100644 ---- a/net/rxrpc/local_object.c -+++ b/net/rxrpc/local_object.c -@@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) - - tuncfg.encap_type = UDP_ENCAP_RXRPC; - tuncfg.encap_rcv = rxrpc_input_packet; -+ tuncfg.encap_err_rcv = rxrpc_encap_err_rcv; - tuncfg.sk_user_data = local; - setup_udp_tunnel_sock(net, local->socket, &tuncfg); - -diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c -index be032850ae8ca..32561e9567fe3 100644 ---- a/net/rxrpc/peer_event.c -+++ b/net/rxrpc/peer_event.c -@@ -16,22 +16,105 @@ - #include - #include - #include -+#include - #include "ar-internal.h" - -+static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int); - static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); - static void rxrpc_distribute_error(struct rxrpc_peer *, int, - enum rxrpc_call_completion); - - /* -- * Find the peer associated with an ICMP packet. -+ * Find the peer associated with an ICMPv4 packet. - */ - static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, -- const struct sk_buff *skb, -+ struct sk_buff *skb, -+ unsigned int udp_offset, -+ unsigned int *info, - struct sockaddr_rxrpc *srx) - { -- struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); -+ struct iphdr *ip, *ip0 = ip_hdr(skb); -+ struct icmphdr *icmp = icmp_hdr(skb); -+ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset); - -- _enter(""); -+ _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code); -+ -+ switch (icmp->type) { -+ case ICMP_DEST_UNREACH: -+ *info = ntohs(icmp->un.frag.mtu); -+ fallthrough; -+ case ICMP_TIME_EXCEEDED: -+ case ICMP_PARAMETERPROB: -+ ip = (struct iphdr *)((void *)icmp + 8); -+ break; -+ default: -+ return NULL; -+ } -+ -+ memset(srx, 0, sizeof(*srx)); -+ srx->transport_type = local->srx.transport_type; -+ srx->transport_len = local->srx.transport_len; -+ srx->transport.family = local->srx.transport.family; -+ -+ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice -+ * versa? -+ */ -+ switch (srx->transport.family) { -+ case AF_INET: -+ srx->transport_len = sizeof(srx->transport.sin); -+ srx->transport.family = AF_INET; -+ srx->transport.sin.sin_port = udp->dest; -+ memcpy(&srx->transport.sin.sin_addr, &ip->daddr, -+ sizeof(struct in_addr)); -+ break; -+ -+#ifdef CONFIG_AF_RXRPC_IPV6 -+ case AF_INET6: -+ srx->transport_len = sizeof(srx->transport.sin); -+ srx->transport.family = AF_INET; -+ srx->transport.sin.sin_port = udp->dest; -+ memcpy(&srx->transport.sin.sin_addr, &ip->daddr, -+ sizeof(struct in_addr)); -+ break; -+#endif -+ -+ default: -+ WARN_ON_ONCE(1); -+ return NULL; -+ } -+ -+ _net("ICMP {%pISp}", &srx->transport); -+ return rxrpc_lookup_peer_rcu(local, srx); -+} -+ -+#ifdef CONFIG_AF_RXRPC_IPV6 -+/* -+ * Find the peer associated with an ICMPv6 packet. -+ */ -+static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local, -+ struct sk_buff *skb, -+ unsigned int udp_offset, -+ unsigned int *info, -+ struct sockaddr_rxrpc *srx) -+{ -+ struct icmp6hdr *icmp = icmp6_hdr(skb); -+ struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb); -+ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset); -+ -+ _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code); -+ -+ switch (icmp->icmp6_type) { -+ case ICMPV6_DEST_UNREACH: -+ *info = ntohl(icmp->icmp6_mtu); -+ fallthrough; -+ case ICMPV6_PKT_TOOBIG: -+ case ICMPV6_TIME_EXCEED: -+ case ICMPV6_PARAMPROB: -+ ip = (struct ipv6hdr *)((void *)icmp + 8); -+ break; -+ default: -+ return NULL; -+ } - - memset(srx, 0, sizeof(*srx)); - srx->transport_type = local->srx.transport_type; -@@ -41,6 +124,165 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, - /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice - * versa? - */ -+ switch (srx->transport.family) { -+ case AF_INET: -+ _net("Rx ICMP6 on v4 sock"); -+ srx->transport_len = sizeof(srx->transport.sin); -+ srx->transport.family = AF_INET; -+ srx->transport.sin.sin_port = udp->dest; -+ memcpy(&srx->transport.sin.sin_addr, -+ &ip->daddr.s6_addr32[3], sizeof(struct in_addr)); -+ break; -+ case AF_INET6: -+ _net("Rx ICMP6"); -+ srx->transport.sin.sin_port = udp->dest; -+ memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr, -+ sizeof(struct in6_addr)); -+ break; -+ default: -+ WARN_ON_ONCE(1); -+ return NULL; -+ } -+ -+ _net("ICMP {%pISp}", &srx->transport); -+ return rxrpc_lookup_peer_rcu(local, srx); -+} -+#endif /* CONFIG_AF_RXRPC_IPV6 */ -+ -+/* -+ * Handle an error received on the local endpoint as a tunnel. -+ */ -+void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, -+ unsigned int udp_offset) -+{ -+ struct sock_extended_err ee; -+ struct sockaddr_rxrpc srx; -+ struct rxrpc_local *local; -+ struct rxrpc_peer *peer; -+ unsigned int info = 0; -+ int err; -+ u8 version = ip_hdr(skb)->version; -+ u8 type = icmp_hdr(skb)->type; -+ u8 code = icmp_hdr(skb)->code; -+ -+ rcu_read_lock(); -+ local = rcu_dereference_sk_user_data(sk); -+ if (unlikely(!local)) { -+ rcu_read_unlock(); -+ return; -+ } -+ -+ rxrpc_new_skb(skb, rxrpc_skb_received); -+ -+ switch (ip_hdr(skb)->version) { -+ case IPVERSION: -+ peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset, -+ &info, &srx); -+ break; -+#ifdef CONFIG_AF_RXRPC_IPV6 -+ case 6: -+ peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset, -+ &info, &srx); -+ break; -+#endif -+ default: -+ rcu_read_unlock(); -+ return; -+ } -+ -+ if (peer && !rxrpc_get_peer_maybe(peer)) -+ peer = NULL; -+ if (!peer) { -+ rcu_read_unlock(); -+ return; -+ } -+ -+ memset(&ee, 0, sizeof(ee)); -+ -+ switch (version) { -+ case IPVERSION: -+ switch (type) { -+ case ICMP_DEST_UNREACH: -+ switch (code) { -+ case ICMP_FRAG_NEEDED: -+ rxrpc_adjust_mtu(peer, info); -+ rcu_read_unlock(); -+ rxrpc_put_peer(peer); -+ return; -+ default: -+ break; -+ } -+ -+ err = EHOSTUNREACH; -+ if (code <= NR_ICMP_UNREACH) { -+ /* Might want to do something different with -+ * non-fatal errors -+ */ -+ //harderr = icmp_err_convert[code].fatal; -+ err = icmp_err_convert[code].errno; -+ } -+ break; -+ -+ case ICMP_TIME_EXCEEDED: -+ err = EHOSTUNREACH; -+ break; -+ default: -+ err = EPROTO; -+ break; -+ } -+ -+ ee.ee_origin = SO_EE_ORIGIN_ICMP; -+ ee.ee_type = type; -+ ee.ee_code = code; -+ ee.ee_errno = err; -+ break; -+ -+#ifdef CONFIG_AF_RXRPC_IPV6 -+ case 6: -+ switch (type) { -+ case ICMPV6_PKT_TOOBIG: -+ rxrpc_adjust_mtu(peer, info); -+ rcu_read_unlock(); -+ rxrpc_put_peer(peer); -+ return; -+ } -+ -+ icmpv6_err_convert(type, code, &err); -+ -+ if (err == EACCES) -+ err = EHOSTUNREACH; -+ -+ ee.ee_origin = SO_EE_ORIGIN_ICMP6; -+ ee.ee_type = type; -+ ee.ee_code = code; -+ ee.ee_errno = err; -+ break; -+#endif -+ } -+ -+ trace_rxrpc_rx_icmp(peer, &ee, &srx); -+ -+ rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR); -+ rcu_read_unlock(); -+ rxrpc_put_peer(peer); -+} -+ -+/* -+ * Find the peer associated with a local error. -+ */ -+static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local, -+ const struct sk_buff *skb, -+ struct sockaddr_rxrpc *srx) -+{ -+ struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); -+ -+ _enter(""); -+ -+ memset(srx, 0, sizeof(*srx)); -+ srx->transport_type = local->srx.transport_type; -+ srx->transport_len = local->srx.transport_len; -+ srx->transport.family = local->srx.transport.family; -+ - switch (srx->transport.family) { - case AF_INET: - srx->transport_len = sizeof(srx->transport.sin); -@@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, - /* - * Handle an MTU/fragmentation problem. - */ --static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) -+static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) - { -- u32 mtu = serr->ee.ee_info; -- - _net("Rx ICMP Fragmentation Needed (%d)", mtu); - - /* wind down the local interface MTU */ -@@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk) - struct sock_exterr_skb *serr; - struct sockaddr_rxrpc srx; - struct rxrpc_local *local; -- struct rxrpc_peer *peer; -+ struct rxrpc_peer *peer = NULL; - struct sk_buff *skb; - - rcu_read_lock(); -@@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk) - } - rxrpc_new_skb(skb, rxrpc_skb_received); - serr = SKB_EXT_ERR(skb); -- if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { -- _leave("UDP empty message"); -- rcu_read_unlock(); -- rxrpc_free_skb(skb, rxrpc_skb_freed); -- return; -- } - -- peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); -- if (peer && !rxrpc_get_peer_maybe(peer)) -- peer = NULL; -- if (!peer) { -- rcu_read_unlock(); -- rxrpc_free_skb(skb, rxrpc_skb_freed); -- _leave(" [no peer]"); -- return; -- } -- -- trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); -- -- if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && -- serr->ee.ee_type == ICMP_DEST_UNREACH && -- serr->ee.ee_code == ICMP_FRAG_NEEDED)) { -- rxrpc_adjust_mtu(peer, serr); -- rcu_read_unlock(); -- rxrpc_free_skb(skb, rxrpc_skb_freed); -- rxrpc_put_peer(peer); -- _leave(" [MTU update]"); -- return; -+ if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) { -+ peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx); -+ if (peer && !rxrpc_get_peer_maybe(peer)) -+ peer = NULL; -+ if (peer) { -+ trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); -+ rxrpc_store_error(peer, serr); -+ } - } - -- rxrpc_store_error(peer, serr); - rcu_read_unlock(); - rxrpc_free_skb(skb, rxrpc_skb_freed); - rxrpc_put_peer(peer); -- - _leave(""); - } - -diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c -index 08aab5c01437d..db47844f4ac99 100644 ---- a/net/rxrpc/rxkad.c -+++ b/net/rxrpc/rxkad.c -@@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, - * directly into the target buffer. - */ - sg = _sg; -- nsg = skb_shinfo(skb)->nr_frags; -+ nsg = skb_shinfo(skb)->nr_frags + 1; - if (nsg <= 4) { - nsg = 4; - } else { -diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c -index 3d061a13d7ed2..2829455211f8c 100644 ---- a/net/sched/sch_sfb.c -+++ b/net/sched/sch_sfb.c -@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) - } - } - --static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) -+static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) - { - u32 sfbhash; - -- sfbhash = sfb_hash(skb, 0); -+ sfbhash = cb->hashes[0]; - if (sfbhash) - increment_one_qlen(sfbhash, 0, q); - -- sfbhash = sfb_hash(skb, 1); -+ sfbhash = cb->hashes[1]; - if (sfbhash) - increment_one_qlen(sfbhash, 1, q); - } -@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, - { - - struct sfb_sched_data *q = qdisc_priv(sch); -+ unsigned int len = qdisc_pkt_len(skb); - struct Qdisc *child = q->qdisc; - struct tcf_proto *fl; -+ struct sfb_skb_cb cb; - int i; - u32 p_min = ~0; - u32 minqlen = ~0; -@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, - } - - enqueue: -+ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); - ret = qdisc_enqueue(skb, child, to_free); - if (likely(ret == NET_XMIT_SUCCESS)) { -- qdisc_qstats_backlog_inc(sch, skb); -+ sch->qstats.backlog += len; - sch->q.qlen++; -- increment_qlen(skb, q); -+ increment_qlen(&cb, q); - } else if (net_xmit_drop_count(ret)) { - q->stats.childdrop++; - qdisc_qstats_drop(sch); -diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c -index b9c71a304d399..0b941dd63d268 100644 ---- a/net/sched/sch_taprio.c -+++ b/net/sched/sch_taprio.c -@@ -18,6 +18,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -176,7 +177,7 @@ static ktime_t get_interval_end_time(struct sched_gate_list *sched, - - static int length_to_duration(struct taprio_sched *q, int len) - { -- return div_u64(len * atomic64_read(&q->picos_per_byte), 1000); -+ return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); - } - - /* Returns the entry corresponding to next available interval. If -@@ -551,7 +552,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch) - static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) - { - atomic_set(&entry->budget, -- div64_u64((u64)entry->interval * 1000, -+ div64_u64((u64)entry->interval * PSEC_PER_NSEC, - atomic64_read(&q->picos_per_byte))); - } - -diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c -index f40f6ed0fbdb4..1f3bb1f6b1f7b 100644 ---- a/net/smc/smc_core.c -+++ b/net/smc/smc_core.c -@@ -755,6 +755,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, - lnk->lgr = lgr; - smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */ - lnk->link_idx = link_idx; -+ lnk->wr_rx_id_compl = 0; - smc_ibdev_cnt_inc(lnk); - smcr_copy_dev_info_to_link(lnk); - atomic_set(&lnk->conn_cnt, 0); -diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h -index 4cb03e9423648..7b43a78c7f73a 100644 ---- a/net/smc/smc_core.h -+++ b/net/smc/smc_core.h -@@ -115,8 +115,10 @@ struct smc_link { - dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ - dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/ - u64 wr_rx_id; /* seq # of last recv WR */ -+ u64 wr_rx_id_compl; /* seq # of last completed WR */ - u32 wr_rx_cnt; /* number of WR recv buffers */ - unsigned long wr_rx_tstamp; /* jiffies when last buf rx */ -+ wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */ - - struct ib_reg_wr wr_reg; /* WR register memory region */ - wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ -diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c -index 26f8f240d9e84..b0678a417e09d 100644 ---- a/net/smc/smc_wr.c -+++ b/net/smc/smc_wr.c -@@ -454,6 +454,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) - - for (i = 0; i < num; i++) { - link = wc[i].qp->qp_context; -+ link->wr_rx_id_compl = wc[i].wr_id; - if (wc[i].status == IB_WC_SUCCESS) { - link->wr_rx_tstamp = jiffies; - smc_wr_rx_demultiplex(&wc[i]); -@@ -465,6 +466,8 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) - case IB_WC_RNR_RETRY_EXC_ERR: - case IB_WC_WR_FLUSH_ERR: - smcr_link_down_cond_sched(link); -+ if (link->wr_rx_id_compl == link->wr_rx_id) -+ wake_up(&link->wr_rx_empty_wait); - break; - default: - smc_wr_rx_post(link); /* refill WR RX */ -@@ -639,6 +642,7 @@ void smc_wr_free_link(struct smc_link *lnk) - return; - ibdev = lnk->smcibdev->ibdev; - -+ smc_wr_drain_cq(lnk); - smc_wr_wakeup_reg_wait(lnk); - smc_wr_wakeup_tx_wait(lnk); - -@@ -889,6 +893,7 @@ int smc_wr_create_link(struct smc_link *lnk) - atomic_set(&lnk->wr_tx_refcnt, 0); - init_waitqueue_head(&lnk->wr_reg_wait); - atomic_set(&lnk->wr_reg_refcnt, 0); -+ init_waitqueue_head(&lnk->wr_rx_empty_wait); - return rc; - - dma_unmap: -diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h -index a54e90a1110fd..45e9b894d3f8a 100644 ---- a/net/smc/smc_wr.h -+++ b/net/smc/smc_wr.h -@@ -73,6 +73,11 @@ static inline void smc_wr_tx_link_put(struct smc_link *link) - wake_up_all(&link->wr_tx_wait); - } - -+static inline void smc_wr_drain_cq(struct smc_link *lnk) -+{ -+ wait_event(lnk->wr_rx_empty_wait, lnk->wr_rx_id_compl == lnk->wr_rx_id); -+} -+ - static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk) - { - wake_up_all(&lnk->wr_tx_wait); -diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c -index 2f4d23238a7e3..9618e4429f0fe 100644 ---- a/net/tipc/monitor.c -+++ b/net/tipc/monitor.c -@@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v) - - static int map_get(u64 up_map, int i) - { -- return (up_map & (1 << i)) >> i; -+ return (up_map & (1ULL << i)) >> i; - } - - static struct tipc_peer *peer_prev(struct tipc_peer *peer) -diff --git a/security/security.c b/security/security.c -index 188b8f7822206..8b62654ff3f97 100644 ---- a/security/security.c -+++ b/security/security.c -@@ -2654,4 +2654,8 @@ int security_uring_sqpoll(void) - { - return call_int_hook(uring_sqpoll, 0); - } -+int security_uring_cmd(struct io_uring_cmd *ioucmd) -+{ -+ return call_int_hook(uring_cmd, 0, ioucmd); -+} - #endif /* CONFIG_IO_URING */ -diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c -index 1bbd53321d133..e90dfa36f79aa 100644 ---- a/security/selinux/hooks.c -+++ b/security/selinux/hooks.c -@@ -91,6 +91,7 @@ - #include - #include - #include -+#include - - #include "avc.h" - #include "objsec.h" -@@ -6990,6 +6991,28 @@ static int selinux_uring_sqpoll(void) - return avc_has_perm(&selinux_state, sid, sid, - SECCLASS_IO_URING, IO_URING__SQPOLL, NULL); - } -+ -+/** -+ * selinux_uring_cmd - check if IORING_OP_URING_CMD is allowed -+ * @ioucmd: the io_uring command structure -+ * -+ * Check to see if the current domain is allowed to execute an -+ * IORING_OP_URING_CMD against the device/file specified in @ioucmd. -+ * -+ */ -+static int selinux_uring_cmd(struct io_uring_cmd *ioucmd) -+{ -+ struct file *file = ioucmd->file; -+ struct inode *inode = file_inode(file); -+ struct inode_security_struct *isec = selinux_inode(inode); -+ struct common_audit_data ad; -+ -+ ad.type = LSM_AUDIT_DATA_FILE; -+ ad.u.file = file; -+ -+ return avc_has_perm(&selinux_state, current_sid(), isec->sid, -+ SECCLASS_IO_URING, IO_URING__CMD, &ad); -+} - #endif /* CONFIG_IO_URING */ - - /* -@@ -7234,6 +7257,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { - #ifdef CONFIG_IO_URING - LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds), - LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll), -+ LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd), - #endif - - /* -diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h -index ff757ae5f2537..1c2f41ff4e551 100644 ---- a/security/selinux/include/classmap.h -+++ b/security/selinux/include/classmap.h -@@ -253,7 +253,7 @@ const struct security_class_mapping secclass_map[] = { - { "anon_inode", - { COMMON_FILE_PERMS, NULL } }, - { "io_uring", -- { "override_creds", "sqpoll", NULL } }, -+ { "override_creds", "sqpoll", "cmd", NULL } }, - { NULL } - }; - -diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c -index 6207762dbdb13..b30e20f64471c 100644 ---- a/security/smack/smack_lsm.c -+++ b/security/smack/smack_lsm.c -@@ -42,6 +42,7 @@ - #include - #include - #include -+#include - #include "smack.h" - - #define TRANS_TRUE "TRUE" -@@ -4739,6 +4740,36 @@ static int smack_uring_sqpoll(void) - return -EPERM; - } - -+/** -+ * smack_uring_cmd - check on file operations for io_uring -+ * @ioucmd: the command in question -+ * -+ * Make a best guess about whether a io_uring "command" should -+ * be allowed. Use the same logic used for determining if the -+ * file could be opened for read in the absence of better criteria. -+ */ -+static int smack_uring_cmd(struct io_uring_cmd *ioucmd) -+{ -+ struct file *file = ioucmd->file; -+ struct smk_audit_info ad; -+ struct task_smack *tsp; -+ struct inode *inode; -+ int rc; -+ -+ if (!file) -+ return -EINVAL; -+ -+ tsp = smack_cred(file->f_cred); -+ inode = file_inode(file); -+ -+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); -+ smk_ad_setfield_u_fs_path(&ad, file->f_path); -+ rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad); -+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc); -+ -+ return rc; -+} -+ - #endif /* CONFIG_IO_URING */ - - struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = { -@@ -4896,6 +4927,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = { - #ifdef CONFIG_IO_URING - LSM_HOOK_INIT(uring_override_creds, smack_uring_override_creds), - LSM_HOOK_INIT(uring_sqpoll, smack_uring_sqpoll), -+ LSM_HOOK_INIT(uring_cmd, smack_uring_cmd), - #endif - }; - -diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c -index 55b3c49ba61de..244afc38ddcaa 100644 ---- a/sound/core/memalloc.c -+++ b/sound/core/memalloc.c -@@ -535,10 +535,13 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) - dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, - sg_dma_address(sgt->sgl)); - p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); -- if (p) -+ if (p) { - dmab->private_data = sgt; -- else -+ /* store the first page address for convenience */ -+ dmab->addr = snd_sgbuf_get_addr(dmab, 0); -+ } else { - dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); -+ } - return p; - } - -@@ -772,6 +775,8 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) - if (!p) - goto error; - dmab->private_data = sgbuf; -+ /* store the first page address for convenience */ -+ dmab->addr = snd_sgbuf_get_addr(dmab, 0); - return p; - - error: -diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c -index 90c3a367d7de9..02df915eb3c66 100644 ---- a/sound/core/oss/pcm_oss.c -+++ b/sound/core/oss/pcm_oss.c -@@ -1672,14 +1672,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file) - runtime = substream->runtime; - if (atomic_read(&substream->mmap_count)) - goto __direct; -- err = snd_pcm_oss_make_ready(substream); -- if (err < 0) -- return err; - atomic_inc(&runtime->oss.rw_ref); - if (mutex_lock_interruptible(&runtime->oss.params_lock)) { - atomic_dec(&runtime->oss.rw_ref); - return -ERESTARTSYS; - } -+ err = snd_pcm_oss_make_ready_locked(substream); -+ if (err < 0) -+ goto unlock; - format = snd_pcm_oss_format_from(runtime->oss.format); - width = snd_pcm_format_physical_width(format); - if (runtime->oss.buffer_used > 0) { -diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c -index 9b4a7cdb103ad..12f12a294df5a 100644 ---- a/sound/drivers/aloop.c -+++ b/sound/drivers/aloop.c -@@ -605,17 +605,18 @@ static unsigned int loopback_jiffies_timer_pos_update - cable->streams[SNDRV_PCM_STREAM_PLAYBACK]; - struct loopback_pcm *dpcm_capt = - cable->streams[SNDRV_PCM_STREAM_CAPTURE]; -- unsigned long delta_play = 0, delta_capt = 0; -+ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies; - unsigned int running, count1, count2; - -+ cur_jiffies = jiffies; - running = cable->running ^ cable->pause; - if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) { -- delta_play = jiffies - dpcm_play->last_jiffies; -+ delta_play = cur_jiffies - dpcm_play->last_jiffies; - dpcm_play->last_jiffies += delta_play; - } - - if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) { -- delta_capt = jiffies - dpcm_capt->last_jiffies; -+ delta_capt = cur_jiffies - dpcm_capt->last_jiffies; - dpcm_capt->last_jiffies += delta_capt; - } - -diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c -index b2701a4452d86..48af77ae8020f 100644 ---- a/sound/pci/emu10k1/emupcm.c -+++ b/sound/pci/emu10k1/emupcm.c -@@ -124,7 +124,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic - epcm->voices[0]->epcm = epcm; - if (voices > 1) { - for (i = 1; i < voices; i++) { -- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i]; -+ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G]; - epcm->voices[i]->epcm = epcm; - } - } -diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index a77165bd92a98..b20694fd69dea 100644 ---- a/sound/pci/hda/hda_intel.c -+++ b/sound/pci/hda/hda_intel.c -@@ -1817,7 +1817,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, - - /* use the non-cached pages in non-snoop mode */ - if (!azx_snoop(chip)) -- azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC; -+ azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC_SG; - - if (chip->driver_type == AZX_DRIVER_NVIDIA) { - dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n"); -diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c -index d243800464352..bcca1cf3cd7b6 100644 ---- a/sound/soc/atmel/mchp-spdiftx.c -+++ b/sound/soc/atmel/mchp-spdiftx.c -@@ -196,8 +196,7 @@ struct mchp_spdiftx_dev { - struct clk *pclk; - struct clk *gclk; - unsigned int fmt; -- const struct mchp_i2s_caps *caps; -- int gclk_enabled:1; -+ unsigned int gclk_enabled:1; - }; - - static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev) -@@ -766,8 +765,6 @@ static const struct of_device_id mchp_spdiftx_dt_ids[] = { - MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids); - static int mchp_spdiftx_probe(struct platform_device *pdev) - { -- struct device_node *np = pdev->dev.of_node; -- const struct of_device_id *match; - struct mchp_spdiftx_dev *dev; - struct resource *mem; - struct regmap *regmap; -@@ -781,11 +778,6 @@ static int mchp_spdiftx_probe(struct platform_device *pdev) - if (!dev) - return -ENOMEM; - -- /* Get hardware capabilities. */ -- match = of_match_node(mchp_spdiftx_dt_ids, np); -- if (match) -- dev->caps = match->data; -- - /* Map I/O registers. */ - base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); - if (IS_ERR(base)) -diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c -index 4fade23887972..8cba3015398b7 100644 ---- a/sound/soc/codecs/cs42l42.c -+++ b/sound/soc/codecs/cs42l42.c -@@ -1618,7 +1618,6 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data) - unsigned int current_plug_status; - unsigned int current_button_status; - unsigned int i; -- int report = 0; - - mutex_lock(&cs42l42->irq_lock); - if (cs42l42->suspended) { -@@ -1713,13 +1712,15 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data) - - if (current_button_status & CS42L42_M_DETECT_TF_MASK) { - dev_dbg(cs42l42->dev, "Button released\n"); -- report = 0; -+ snd_soc_jack_report(cs42l42->jack, 0, -+ SND_JACK_BTN_0 | SND_JACK_BTN_1 | -+ SND_JACK_BTN_2 | SND_JACK_BTN_3); - } else if (current_button_status & CS42L42_M_DETECT_FT_MASK) { -- report = cs42l42_handle_button_press(cs42l42); -- -+ snd_soc_jack_report(cs42l42->jack, -+ cs42l42_handle_button_press(cs42l42), -+ SND_JACK_BTN_0 | SND_JACK_BTN_1 | -+ SND_JACK_BTN_2 | SND_JACK_BTN_3); - } -- snd_soc_jack_report(cs42l42->jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 | -- SND_JACK_BTN_2 | SND_JACK_BTN_3); - } - } - -diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c -index 6e1184c8b672a..c48ac107810d4 100644 ---- a/sound/soc/qcom/sm8250.c -+++ b/sound/soc/qcom/sm8250.c -@@ -270,6 +270,7 @@ static int sm8250_platform_probe(struct platform_device *pdev) - if (!card) - return -ENOMEM; - -+ card->owner = THIS_MODULE; - /* Allocate the private data */ - data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); - if (!data) -diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig -index 4542868cd730f..39216c09f1597 100644 ---- a/sound/soc/sof/Kconfig -+++ b/sound/soc/sof/Kconfig -@@ -196,6 +196,7 @@ config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE - - config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST - tristate "SOF enable IPC flood test" -+ depends on SND_SOC_SOF - select SND_SOC_SOF_CLIENT - help - This option enables a separate client device for IPC flood test -@@ -214,6 +215,7 @@ config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM - - config SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR - tristate "SOF enable IPC message injector" -+ depends on SND_SOC_SOF - select SND_SOC_SOF_CLIENT - help - This option enables the IPC message injector which can be used to send -diff --git a/sound/usb/card.c b/sound/usb/card.c -index d356743de2ff9..706d249a9ad6b 100644 ---- a/sound/usb/card.c -+++ b/sound/usb/card.c -@@ -699,7 +699,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface) - if (delayed_register[i] && - sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 && - id == chip->usb_id) -- return inum != iface; -+ return iface < inum; - } - - return false; -diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c -index f9c921683948d..ff2aa13b7b26f 100644 ---- a/sound/usb/endpoint.c -+++ b/sound/usb/endpoint.c -@@ -758,7 +758,8 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, - * The endpoint needs to be closed via snd_usb_endpoint_close() later. - * - * Note that this function doesn't configure the endpoint. The substream -- * needs to set it up later via snd_usb_endpoint_configure(). -+ * needs to set it up later via snd_usb_endpoint_set_params() and -+ * snd_usb_endpoint_prepare(). - */ - struct snd_usb_endpoint * - snd_usb_endpoint_open(struct snd_usb_audio *chip, -@@ -924,6 +925,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip, - endpoint_set_interface(chip, ep, false); - - if (!--ep->opened) { -+ if (ep->clock_ref && !atomic_read(&ep->clock_ref->locked)) -+ ep->clock_ref->rate = 0; - ep->iface = 0; - ep->altsetting = 0; - ep->cur_audiofmt = NULL; -@@ -1290,12 +1293,13 @@ out_of_memory: - /* - * snd_usb_endpoint_set_params: configure an snd_usb_endpoint - * -+ * It's called either from hw_params callback. - * Determine the number of URBs to be used on this endpoint. - * An endpoint must be configured before it can be started. - * An endpoint that is already running can not be reconfigured. - */ --static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep) -+int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep) - { - const struct audioformat *fmt = ep->cur_audiofmt; - int err; -@@ -1378,18 +1382,18 @@ static int init_sample_rate(struct snd_usb_audio *chip, - } - - /* -- * snd_usb_endpoint_configure: Configure the endpoint -+ * snd_usb_endpoint_prepare: Prepare the endpoint - * - * This function sets up the EP to be fully usable state. -- * It's called either from hw_params or prepare callback. -+ * It's called either from prepare callback. - * The function checks need_setup flag, and performs nothing unless needed, - * so it's safe to call this multiple times. - * - * This returns zero if unchanged, 1 if the configuration has changed, - * or a negative error code. - */ --int snd_usb_endpoint_configure(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep) -+int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep) - { - bool iface_first; - int err = 0; -@@ -1410,9 +1414,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip, - if (err < 0) - goto unlock; - } -- err = snd_usb_endpoint_set_params(chip, ep); -- if (err < 0) -- goto unlock; - goto done; - } - -@@ -1440,10 +1441,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip, - if (err < 0) - goto unlock; - -- err = snd_usb_endpoint_set_params(chip, ep); -- if (err < 0) -- goto unlock; -- - err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt); - if (err < 0) - goto unlock; -diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h -index 6a9af04cf175a..e67ea28faa54f 100644 ---- a/sound/usb/endpoint.h -+++ b/sound/usb/endpoint.h -@@ -17,8 +17,10 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip, - bool is_sync_ep); - void snd_usb_endpoint_close(struct snd_usb_audio *chip, - struct snd_usb_endpoint *ep); --int snd_usb_endpoint_configure(struct snd_usb_audio *chip, -- struct snd_usb_endpoint *ep); -+int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep); -+int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, -+ struct snd_usb_endpoint *ep); - int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock); - - bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, -diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c -index e692ae04436a5..02035b545f9dd 100644 ---- a/sound/usb/pcm.c -+++ b/sound/usb/pcm.c -@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip, - if (stop_endpoints(subs, false)) - sync_pending_stops(subs); - if (subs->sync_endpoint) { -- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); -+ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); - if (err < 0) - return err; - } -- err = snd_usb_endpoint_configure(chip, subs->data_endpoint); -+ err = snd_usb_endpoint_prepare(chip, subs->data_endpoint); - if (err < 0) - return err; - snd_usb_set_format_quirk(subs, subs->cur_audiofmt); - } else { - if (subs->sync_endpoint) { -- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); -+ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); - if (err < 0) - return err; - } -@@ -551,7 +551,13 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, - subs->cur_audiofmt = fmt; - mutex_unlock(&chip->mutex); - -- ret = configure_endpoints(chip, subs); -+ if (subs->sync_endpoint) { -+ ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint); -+ if (ret < 0) -+ goto unlock; -+ } -+ -+ ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint); - - unlock: - if (ret < 0) -diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c -index 9bfead5efc4c1..5b4d8f5eade20 100644 ---- a/sound/usb/quirks.c -+++ b/sound/usb/quirks.c -@@ -1764,7 +1764,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface) - - for (q = registration_quirks; q->usb_id; q++) - if (chip->usb_id == q->usb_id) -- return iface != q->interface; -+ return iface < q->interface; - - /* Register as normal */ - return false; -diff --git a/sound/usb/stream.c b/sound/usb/stream.c -index ceb93d798182c..f10f4e6d3fb85 100644 ---- a/sound/usb/stream.c -+++ b/sound/usb/stream.c -@@ -495,6 +495,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, - return 0; - } - } -+ -+ if (chip->card->registered) -+ chip->need_delayed_register = true; -+ - /* look for an empty stream */ - list_for_each_entry(as, &chip->pcm_list, list) { - if (as->fmt_type != fp->fmt_type) -@@ -502,9 +506,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip, - subs = &as->substream[stream]; - if (subs->ep_num) - continue; -- if (snd_device_get_state(chip->card, as->pcm) != -- SNDRV_DEV_BUILD) -- chip->need_delayed_register = true; - err = snd_pcm_new_stream(as->pcm, stream, 1); - if (err < 0) - return err; -@@ -1105,7 +1106,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip, - * Dallas DS4201 workaround: It presents 5 altsettings, but the last - * one misses syncpipe, and does not produce any sound. - */ -- if (chip->usb_id == USB_ID(0x04fa, 0x4201)) -+ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4) - num = 4; - - for (i = 0; i < num; i++) { -diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c -index e6c98a6e3908e..6b1bafe267a42 100644 ---- a/tools/lib/perf/evlist.c -+++ b/tools/lib/perf/evlist.c -@@ -486,6 +486,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - if (ops->idx) - ops->idx(evlist, evsel, mp, idx); - -+ pr_debug("idx %d: mmapping fd %d\n", idx, *output); - if (ops->mmap(map, mp, *output, evlist_cpu) < 0) - return -1; - -@@ -494,6 +495,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - if (!idx) - perf_evlist__set_mmap_first(evlist, map, overwrite); - } else { -+ pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output); - if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) - return -1; - -@@ -519,6 +521,48 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - return 0; - } - -+static int -+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, -+ struct perf_mmap_param *mp) -+{ -+ int nr_threads = perf_thread_map__nr(evlist->threads); -+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus); -+ int cpu, thread, idx = 0; -+ int nr_mmaps = 0; -+ -+ pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n", -+ __func__, nr_cpus, nr_threads); -+ -+ /* per-thread mmaps */ -+ for (thread = 0; thread < nr_threads; thread++, idx++) { -+ int output = -1; -+ int output_overwrite = -1; -+ -+ if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output, -+ &output_overwrite, &nr_mmaps)) -+ goto out_unmap; -+ } -+ -+ /* system-wide mmaps i.e. per-cpu */ -+ for (cpu = 1; cpu < nr_cpus; cpu++, idx++) { -+ int output = -1; -+ int output_overwrite = -1; -+ -+ if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output, -+ &output_overwrite, &nr_mmaps)) -+ goto out_unmap; -+ } -+ -+ if (nr_mmaps != evlist->nr_mmaps) -+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps); -+ -+ return 0; -+ -+out_unmap: -+ perf_evlist__munmap(evlist); -+ return -1; -+} -+ - static int - mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - struct perf_mmap_param *mp) -@@ -528,6 +572,8 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, - int nr_mmaps = 0; - int cpu, thread; - -+ pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads); -+ - for (cpu = 0; cpu < nr_cpus; cpu++) { - int output = -1; - int output_overwrite = -1; -@@ -569,6 +615,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, - struct perf_evlist_mmap_ops *ops, - struct perf_mmap_param *mp) - { -+ const struct perf_cpu_map *cpus = evlist->all_cpus; - struct perf_evsel *evsel; - - if (!ops || !ops->get || !ops->mmap) -@@ -588,6 +635,9 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, - if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) - return -ENOMEM; - -+ if (perf_cpu_map__empty(cpus)) -+ return mmap_per_thread(evlist, ops, mp); -+ - return mmap_per_cpu(evlist, ops, mp); - } - -diff --git a/tools/objtool/check.c b/tools/objtool/check.c -index 31c719f99f66e..5d87e0b0d85f9 100644 ---- a/tools/objtool/check.c -+++ b/tools/objtool/check.c -@@ -162,32 +162,34 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, - - /* - * Unfortunately these have to be hard coded because the noreturn -- * attribute isn't provided in ELF data. -+ * attribute isn't provided in ELF data. Keep 'em sorted. - */ - static const char * const global_noreturns[] = { -+ "__invalid_creds", -+ "__module_put_and_kthread_exit", -+ "__reiserfs_panic", - "__stack_chk_fail", -- "panic", -+ "__ubsan_handle_builtin_unreachable", -+ "cpu_bringup_and_idle", -+ "cpu_startup_entry", - "do_exit", -+ "do_group_exit", - "do_task_dead", -- "kthread_exit", -- "make_task_dead", -- "__module_put_and_kthread_exit", -+ "ex_handler_msr_mce", -+ "fortify_panic", - "kthread_complete_and_exit", -- "__reiserfs_panic", -+ "kthread_exit", -+ "kunit_try_catch_throw", - "lbug_with_loc", -- "fortify_panic", -- "usercopy_abort", - "machine_real_restart", -+ "make_task_dead", -+ "panic", - "rewind_stack_and_make_dead", -- "kunit_try_catch_throw", -- "xen_start_kernel", -- "cpu_bringup_and_idle", -- "do_group_exit", -+ "sev_es_terminate", -+ "snp_abort", - "stop_this_cpu", -- "__invalid_creds", -- "cpu_startup_entry", -- "__ubsan_handle_builtin_unreachable", -- "ex_handler_msr_mce", -+ "usercopy_abort", -+ "xen_start_kernel", - }; - - if (!func) -diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c -index 68f681ad54c1e..777bdf182a582 100644 ---- a/tools/perf/arch/x86/util/evlist.c -+++ b/tools/perf/arch/x86/util/evlist.c -@@ -8,8 +8,13 @@ - #define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}" - #define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}" - --int arch_evlist__add_default_attrs(struct evlist *evlist) -+int arch_evlist__add_default_attrs(struct evlist *evlist, -+ struct perf_event_attr *attrs, -+ size_t nr_attrs) - { -+ if (nr_attrs) -+ return __evlist__add_default_attrs(evlist, attrs, nr_attrs); -+ - if (!pmu_have_event("cpu", "slots")) - return 0; - -diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c -index 9a71f0330137e..68c878b4e5e4c 100644 ---- a/tools/perf/builtin-record.c -+++ b/tools/perf/builtin-record.c -@@ -1892,14 +1892,18 @@ static int record__synthesize(struct record *rec, bool tail) - - err = perf_event__synthesize_bpf_events(session, process_synthesized_event, - machine, opts); -- if (err < 0) -+ if (err < 0) { - pr_warning("Couldn't synthesize bpf events.\n"); -+ err = 0; -+ } - - if (rec->opts.synth & PERF_SYNTH_CGROUP) { - err = perf_event__synthesize_cgroups(tool, process_synthesized_event, - machine); -- if (err < 0) -+ if (err < 0) { - pr_warning("Couldn't synthesize cgroup events.\n"); -+ err = 0; -+ } - } - - if (rec->opts.nr_threads_synthesize > 1) { -diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c -index c689054002cca..26a572c160d6f 100644 ---- a/tools/perf/builtin-script.c -+++ b/tools/perf/builtin-script.c -@@ -441,6 +441,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session) - struct perf_event_attr *attr = &evsel->core.attr; - bool allow_user_set; - -+ if (evsel__is_dummy_event(evsel)) -+ return 0; -+ - if (perf_header__has_feat(&session->header, HEADER_STAT)) - return 0; - -diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c -index 5f0333a8acd8a..82e14faecc3e4 100644 ---- a/tools/perf/builtin-stat.c -+++ b/tools/perf/builtin-stat.c -@@ -1778,6 +1778,9 @@ static int add_default_attributes(void) - (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | - (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, - }; -+ -+ struct perf_event_attr default_null_attrs[] = {}; -+ - /* Set attrs if no event is selected and !null_run: */ - if (stat_config.null_run) - return 0; -@@ -1941,6 +1944,9 @@ setup_metrics: - free(str); - } - -+ if (!stat_config.topdown_level) -+ stat_config.topdown_level = TOPDOWN_MAX_LEVEL; -+ - if (!evsel_list->core.nr_entries) { - if (target__has_cpu(&target)) - default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; -@@ -1957,9 +1963,8 @@ setup_metrics: - } - if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) - return -1; -- -- stat_config.topdown_level = TOPDOWN_MAX_LEVEL; -- if (arch_evlist__add_default_attrs(evsel_list) < 0) -+ /* Platform specific attrs */ -+ if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) - return -1; - } - -diff --git a/tools/perf/dlfilters/dlfilter-show-cycles.c b/tools/perf/dlfilters/dlfilter-show-cycles.c -index 9eccc97bff82f..6d47298ebe9f6 100644 ---- a/tools/perf/dlfilters/dlfilter-show-cycles.c -+++ b/tools/perf/dlfilters/dlfilter-show-cycles.c -@@ -98,9 +98,9 @@ int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, vo - static void print_vals(__u64 cycles, __u64 delta) - { - if (delta) -- printf("%10llu %10llu ", cycles, delta); -+ printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta); - else -- printf("%10llu %10s ", cycles, ""); -+ printf("%10llu %10s ", (unsigned long long)cycles, ""); - } - - int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx) -diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c -index 48af7d379d822..efa5f006b5c61 100644 ---- a/tools/perf/util/evlist.c -+++ b/tools/perf/util/evlist.c -@@ -342,9 +342,14 @@ int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *a - return evlist__add_attrs(evlist, attrs, nr_attrs); - } - --__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused) -+__weak int arch_evlist__add_default_attrs(struct evlist *evlist, -+ struct perf_event_attr *attrs, -+ size_t nr_attrs) - { -- return 0; -+ if (!nr_attrs) -+ return 0; -+ -+ return __evlist__add_default_attrs(evlist, attrs, nr_attrs); - } - - struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) -diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h -index 1bde9ccf4e7da..129095c0fe6d3 100644 ---- a/tools/perf/util/evlist.h -+++ b/tools/perf/util/evlist.h -@@ -107,10 +107,13 @@ static inline int evlist__add_default(struct evlist *evlist) - int __evlist__add_default_attrs(struct evlist *evlist, - struct perf_event_attr *attrs, size_t nr_attrs); - -+int arch_evlist__add_default_attrs(struct evlist *evlist, -+ struct perf_event_attr *attrs, -+ size_t nr_attrs); -+ - #define evlist__add_default_attrs(evlist, array) \ -- __evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) -+ arch_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) - --int arch_evlist__add_default_attrs(struct evlist *evlist); - struct evsel *arch_evlist__leader(struct list_head *list); - - int evlist__add_dummy(struct evlist *evlist); diff --git a/sys-kernel/pinephone-sources/files/5.19.9-10.patch b/sys-kernel/pinephone-sources/files/5.19.9-10.patch deleted file mode 100644 index 331692b..0000000 --- a/sys-kernel/pinephone-sources/files/5.19.9-10.patch +++ /dev/null @@ -1,1723 +0,0 @@ -diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml -index b6bbc312a7cf7..1414ba9977c16 100644 ---- a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml -+++ b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml -@@ -24,8 +24,10 @@ properties: - - interrupts: - minItems: 1 -+ maxItems: 2 - description: - Should be configured with type IRQ_TYPE_EDGE_RISING. -+ If two interrupts are provided, expected order is INT1 and INT2. - - required: - - compatible -diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst -index f615906a0821b..6d721396717a2 100644 ---- a/Documentation/input/joydev/joystick.rst -+++ b/Documentation/input/joydev/joystick.rst -@@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes: - * AVB Mag Turbo Force - * AVB Top Shot Pegasus - * AVB Top Shot Force Feedback Racing Wheel -+* Boeder Force Feedback Wheel - * Logitech WingMan Force - * Logitech WingMan Force Wheel - * Guillemot Race Leader Force Feedback -diff --git a/Makefile b/Makefile -index 1f27c4bd09e67..33a9b6b547c47 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,7 +1,7 @@ - # SPDX-License-Identifier: GPL-2.0 - VERSION = 5 - PATCHLEVEL = 19 --SUBLEVEL = 9 -+SUBLEVEL = 10 - EXTRAVERSION = - NAME = Superb Owl - -diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig -index 62b5b07fa4e1c..ca64bf5f5b038 100644 ---- a/arch/loongarch/Kconfig -+++ b/arch/loongarch/Kconfig -@@ -36,6 +36,7 @@ config LOONGARCH - select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION - select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION -+ select ARCH_KEEP_MEMBLOCK - select ARCH_MIGHT_HAVE_PC_PARPORT - select ARCH_MIGHT_HAVE_PC_SERIO - select ARCH_SPARSEMEM_ENABLE -diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h -index 62044cd5b7bc5..825c2519b9d1f 100644 ---- a/arch/loongarch/include/asm/acpi.h -+++ b/arch/loongarch/include/asm/acpi.h -@@ -15,7 +15,7 @@ extern int acpi_pci_disabled; - extern int acpi_noirq; - - #define acpi_os_ioremap acpi_os_ioremap --void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); -+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); - - static inline void disable_acpi(void) - { -diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c -index bb729ee8a2370..796a24055a942 100644 ---- a/arch/loongarch/kernel/acpi.c -+++ b/arch/loongarch/kernel/acpi.c -@@ -113,7 +113,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) - early_memunmap(map, size); - } - --void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) -+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) - { - if (!memblock_is_memory(phys)) - return ioremap(phys, size); -diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c -index 7094a68c9b832..3c3fbff0b8f86 100644 ---- a/arch/loongarch/mm/init.c -+++ b/arch/loongarch/mm/init.c -@@ -131,18 +131,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) - return ret; - } - --#ifdef CONFIG_NUMA --int memory_add_physaddr_to_nid(u64 start) --{ -- int nid; -- -- nid = pa_to_nid(start); -- return nid; --} --EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); --#endif -- --#ifdef CONFIG_MEMORY_HOTREMOVE - void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) - { - unsigned long start_pfn = start >> PAGE_SHIFT; -@@ -154,6 +142,16 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) - page += vmem_altmap_offset(altmap); - __remove_pages(start_pfn, nr_pages, altmap); - } -+ -+#ifdef CONFIG_NUMA -+int memory_add_physaddr_to_nid(u64 start) -+{ -+ int nid; -+ -+ nid = pa_to_nid(start); -+ return nid; -+} -+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); - #endif - #endif - -diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c -index 356226c7ebbdc..aa1ba803659cd 100644 ---- a/arch/x86/kvm/mmu/mmu.c -+++ b/arch/x86/kvm/mmu/mmu.c -@@ -5907,47 +5907,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, - const struct kvm_memory_slot *memslot, - int start_level) - { -- bool flush = false; -- - if (kvm_memslots_have_rmaps(kvm)) { - write_lock(&kvm->mmu_lock); -- flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, -- start_level, KVM_MAX_HUGEPAGE_LEVEL, -- false); -+ slot_handle_level(kvm, memslot, slot_rmap_write_protect, -+ start_level, KVM_MAX_HUGEPAGE_LEVEL, false); - write_unlock(&kvm->mmu_lock); - } - - if (is_tdp_mmu_enabled(kvm)) { - read_lock(&kvm->mmu_lock); -- flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level); -+ kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level); - read_unlock(&kvm->mmu_lock); - } -- -- /* -- * Flush TLBs if any SPTEs had to be write-protected to ensure that -- * guest writes are reflected in the dirty bitmap before the memslot -- * update completes, i.e. before enabling dirty logging is visible to -- * userspace. -- * -- * Perform the TLB flush outside the mmu_lock to reduce the amount of -- * time the lock is held. However, this does mean that another CPU can -- * now grab mmu_lock and encounter a write-protected SPTE while CPUs -- * still have a writable mapping for the associated GFN in their TLB. -- * -- * This is safe but requires KVM to be careful when making decisions -- * based on the write-protection status of an SPTE. Specifically, KVM -- * also write-protects SPTEs to monitor changes to guest page tables -- * during shadow paging, and must guarantee no CPUs can write to those -- * page before the lock is dropped. As mentioned in the previous -- * paragraph, a write-protected SPTE is no guarantee that CPU cannot -- * perform writes. So to determine if a TLB flush is truly required, KVM -- * will clear a separate software-only bit (MMU-writable) and skip the -- * flush if-and-only-if this bit was already clear. -- * -- * See is_writable_pte() for more details. -- */ -- if (flush) -- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); - } - - /* Must be called with the mmu_lock held in write-mode. */ -@@ -6070,32 +6041,30 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, - void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, - const struct kvm_memory_slot *memslot) - { -- bool flush = false; -- - if (kvm_memslots_have_rmaps(kvm)) { - write_lock(&kvm->mmu_lock); - /* - * Clear dirty bits only on 4k SPTEs since the legacy MMU only - * support dirty logging at a 4k granularity. - */ -- flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false); -+ slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false); - write_unlock(&kvm->mmu_lock); - } - - if (is_tdp_mmu_enabled(kvm)) { - read_lock(&kvm->mmu_lock); -- flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); -+ kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); - read_unlock(&kvm->mmu_lock); - } - - /* -+ * The caller will flush the TLBs after this function returns. -+ * - * It's also safe to flush TLBs out of mmu lock here as currently this - * function is only used for dirty logging, in which case flushing TLB - * out of mmu lock also guarantees no dirty pages will be lost in - * dirty_bitmap. - */ -- if (flush) -- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); - } - - void kvm_mmu_zap_all(struct kvm *kvm) -diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h -index f80dbb628df57..e09bdcf1e47c5 100644 ---- a/arch/x86/kvm/mmu/spte.h -+++ b/arch/x86/kvm/mmu/spte.h -@@ -326,7 +326,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, - } - - /* -- * An shadow-present leaf SPTE may be non-writable for 3 possible reasons: -+ * A shadow-present leaf SPTE may be non-writable for 4 possible reasons: - * - * 1. To intercept writes for dirty logging. KVM write-protects huge pages - * so that they can be split be split down into the dirty logging -@@ -344,8 +344,13 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, - * read-only memslot or guest memory backed by a read-only VMA. Writes to - * such pages are disallowed entirely. - * -- * To keep track of why a given SPTE is write-protected, KVM uses 2 -- * software-only bits in the SPTE: -+ * 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this -+ * case, the SPTE is access-protected, not just write-protected! -+ * -+ * For cases #1 and #4, KVM can safely make such SPTEs writable without taking -+ * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it. -+ * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits -+ * in the SPTE: - * - * shadow_mmu_writable_mask, aka MMU-writable - - * Cleared on SPTEs that KVM is currently write-protecting for shadow paging -@@ -374,7 +379,8 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, - * shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging - * (which does not clear the MMU-writable bit), does not flush TLBs before - * dropping the lock, as it only needs to synchronize guest writes with the -- * dirty bitmap. -+ * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for -+ * access-tracking via the clear_young() MMU notifier also does not flush TLBs. - * - * So, there is the problem: clearing the MMU-writable bit can encounter a - * write-protected SPTE while CPUs still have writable mappings for that SPTE -diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c -index 55de0d1981e52..5b36866528568 100644 ---- a/arch/x86/kvm/x86.c -+++ b/arch/x86/kvm/x86.c -@@ -12265,6 +12265,50 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm, - } else { - kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K); - } -+ -+ /* -+ * Unconditionally flush the TLBs after enabling dirty logging. -+ * A flush is almost always going to be necessary (see below), -+ * and unconditionally flushing allows the helpers to omit -+ * the subtly complex checks when removing write access. -+ * -+ * Do the flush outside of mmu_lock to reduce the amount of -+ * time mmu_lock is held. Flushing after dropping mmu_lock is -+ * safe as KVM only needs to guarantee the slot is fully -+ * write-protected before returning to userspace, i.e. before -+ * userspace can consume the dirty status. -+ * -+ * Flushing outside of mmu_lock requires KVM to be careful when -+ * making decisions based on writable status of an SPTE, e.g. a -+ * !writable SPTE doesn't guarantee a CPU can't perform writes. -+ * -+ * Specifically, KVM also write-protects guest page tables to -+ * monitor changes when using shadow paging, and must guarantee -+ * no CPUs can write to those page before mmu_lock is dropped. -+ * Because CPUs may have stale TLB entries at this point, a -+ * !writable SPTE doesn't guarantee CPUs can't perform writes. -+ * -+ * KVM also allows making SPTES writable outside of mmu_lock, -+ * e.g. to allow dirty logging without taking mmu_lock. -+ * -+ * To handle these scenarios, KVM uses a separate software-only -+ * bit (MMU-writable) to track if a SPTE is !writable due to -+ * a guest page table being write-protected (KVM clears the -+ * MMU-writable flag when write-protecting for shadow paging). -+ * -+ * The use of MMU-writable is also the primary motivation for -+ * the unconditional flush. Because KVM must guarantee that a -+ * CPU doesn't contain stale, writable TLB entries for a -+ * !MMU-writable SPTE, KVM must flush if it encounters any -+ * MMU-writable SPTE regardless of whether the actual hardware -+ * writable bit was set. I.e. KVM is almost guaranteed to need -+ * to flush, while unconditionally flushing allows the "remove -+ * write access" helpers to ignore MMU-writable entirely. -+ * -+ * See is_writable_pte() for more details (the case involving -+ * access-tracked SPTEs is particularly relevant). -+ */ -+ kvm_arch_flush_remote_tlbs_memslot(kvm, new); - } - } - -diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c -index c2d4947844250..510cdec375c4d 100644 ---- a/drivers/acpi/resource.c -+++ b/drivers/acpi/resource.c -@@ -416,6 +416,16 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, - { - int i; - -+#ifdef CONFIG_X86 -+ /* -+ * IRQ override isn't needed on modern AMD Zen systems and -+ * this override breaks active low IRQs on AMD Ryzen 6000 and -+ * newer systems. Skip it. -+ */ -+ if (boot_cpu_has(X86_FEATURE_ZEN)) -+ return false; -+#endif -+ - for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) { - const struct irq_override_cmp *entry = &skip_override_table[i]; - -diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c -index f118ad9bcd33d..0e95351d47d49 100644 ---- a/drivers/gpio/gpio-104-dio-48e.c -+++ b/drivers/gpio/gpio-104-dio-48e.c -@@ -271,6 +271,7 @@ static void dio48e_irq_mask(struct irq_data *data) - dio48egpio->irq_mask &= ~BIT(0); - else - dio48egpio->irq_mask &= ~BIT(1); -+ gpiochip_disable_irq(chip, offset); - - if (!dio48egpio->irq_mask) - /* disable interrupts */ -@@ -298,6 +299,7 @@ static void dio48e_irq_unmask(struct irq_data *data) - iowrite8(0x00, dio48egpio->base + 0xB); - } - -+ gpiochip_enable_irq(chip, offset); - if (offset == 19) - dio48egpio->irq_mask |= BIT(0); - else -@@ -320,12 +322,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type) - return 0; - } - --static struct irq_chip dio48e_irqchip = { -+static const struct irq_chip dio48e_irqchip = { - .name = "104-dio-48e", - .irq_ack = dio48e_irq_ack, - .irq_mask = dio48e_irq_mask, - .irq_unmask = dio48e_irq_unmask, -- .irq_set_type = dio48e_irq_set_type -+ .irq_set_type = dio48e_irq_set_type, -+ .flags = IRQCHIP_IMMUTABLE, -+ GPIOCHIP_IRQ_RESOURCE_HELPERS, - }; - - static irqreturn_t dio48e_irq_handler(int irq, void *dev_id) -@@ -414,7 +418,7 @@ static int dio48e_probe(struct device *dev, unsigned int id) - dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple; - - girq = &dio48egpio->chip.irq; -- girq->chip = &dio48e_irqchip; -+ gpio_irq_chip_set_chip(girq, &dio48e_irqchip); - /* This will let us handle the parent IRQ in the driver */ - girq->parent_handler = NULL; - girq->num_parents = 0; -diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c -index 45f7ad8573e19..a8b7c8eafac5a 100644 ---- a/drivers/gpio/gpio-104-idio-16.c -+++ b/drivers/gpio/gpio-104-idio-16.c -@@ -150,10 +150,11 @@ static void idio_16_irq_mask(struct irq_data *data) - { - struct gpio_chip *chip = irq_data_get_irq_chip_data(data); - struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); -- const unsigned long mask = BIT(irqd_to_hwirq(data)); -+ const unsigned long offset = irqd_to_hwirq(data); - unsigned long flags; - -- idio16gpio->irq_mask &= ~mask; -+ idio16gpio->irq_mask &= ~BIT(offset); -+ gpiochip_disable_irq(chip, offset); - - if (!idio16gpio->irq_mask) { - raw_spin_lock_irqsave(&idio16gpio->lock, flags); -@@ -168,11 +169,12 @@ static void idio_16_irq_unmask(struct irq_data *data) - { - struct gpio_chip *chip = irq_data_get_irq_chip_data(data); - struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip); -- const unsigned long mask = BIT(irqd_to_hwirq(data)); -+ const unsigned long offset = irqd_to_hwirq(data); - const unsigned long prev_irq_mask = idio16gpio->irq_mask; - unsigned long flags; - -- idio16gpio->irq_mask |= mask; -+ gpiochip_enable_irq(chip, offset); -+ idio16gpio->irq_mask |= BIT(offset); - - if (!prev_irq_mask) { - raw_spin_lock_irqsave(&idio16gpio->lock, flags); -@@ -193,12 +195,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type) - return 0; - } - --static struct irq_chip idio_16_irqchip = { -+static const struct irq_chip idio_16_irqchip = { - .name = "104-idio-16", - .irq_ack = idio_16_irq_ack, - .irq_mask = idio_16_irq_mask, - .irq_unmask = idio_16_irq_unmask, -- .irq_set_type = idio_16_irq_set_type -+ .irq_set_type = idio_16_irq_set_type, -+ .flags = IRQCHIP_IMMUTABLE, -+ GPIOCHIP_IRQ_RESOURCE_HELPERS, - }; - - static irqreturn_t idio_16_irq_handler(int irq, void *dev_id) -@@ -275,7 +279,7 @@ static int idio_16_probe(struct device *dev, unsigned int id) - idio16gpio->out_state = 0xFFFF; - - girq = &idio16gpio->chip.irq; -- girq->chip = &idio_16_irqchip; -+ gpio_irq_chip_set_chip(girq, &idio_16_irqchip); - /* This will let us handle the parent IRQ in the driver */ - girq->parent_handler = NULL; - girq->num_parents = 0; -diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c -index 8943cea927642..a2e505a7545cd 100644 ---- a/drivers/gpio/gpio-mockup.c -+++ b/drivers/gpio/gpio-mockup.c -@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev, - } - } - -+static void gpio_mockup_debugfs_cleanup(void *data) -+{ -+ struct gpio_mockup_chip *chip = data; -+ -+ debugfs_remove_recursive(chip->dbg_dir); -+} -+ - static void gpio_mockup_dispose_mappings(void *data) - { - struct gpio_mockup_chip *chip = data; -@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev) - - gpio_mockup_debugfs_setup(dev, chip); - -- return 0; -+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip); - } - - static const struct of_device_id gpio_mockup_of_match[] = { -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c -index ecada5eadfe35..e325150879df7 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c -@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev) - return true; - case CHIP_SIENNA_CICHLID: - if (strnstr(atom_ctx->vbios_version, "D603", -+ sizeof(atom_ctx->vbios_version))) { -+ if (strnstr(atom_ctx->vbios_version, "D603GLXE", - sizeof(atom_ctx->vbios_version))) -- return true; -- else -+ return false; -+ else -+ return true; -+ } else { - return false; -+ } - default: - return false; - } -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -index 2b00f8fe15a89..b19bf0c3f3737 100644 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c -@@ -2372,7 +2372,7 @@ static int psp_load_smu_fw(struct psp_context *psp) - static bool fw_load_skip_check(struct psp_context *psp, - struct amdgpu_firmware_info *ucode) - { -- if (!ucode->fw) -+ if (!ucode->fw || !ucode->ucode_size) - return true; - - if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && -diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c -index 9cde13b07dd26..d9a5209aa8433 100644 ---- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c -+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c -@@ -382,11 +382,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu) - return 0; - } - -+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu, -+ void **table, -+ uint32_t *size) -+{ -+ struct smu_table_context *smu_table = &smu->smu_table; -+ void *combo_pptable = smu_table->combo_pptable; -+ int ret = 0; -+ -+ ret = smu_cmn_get_combo_pptable(smu); -+ if (ret) -+ return ret; -+ -+ *table = combo_pptable; -+ *size = sizeof(struct smu_13_0_7_powerplay_table); -+ -+ return 0; -+} - - static int smu_v13_0_7_setup_pptable(struct smu_context *smu) - { - struct smu_table_context *smu_table = &smu->smu_table; -- void *combo_pptable = smu_table->combo_pptable; - struct amdgpu_device *adev = smu->adev; - int ret = 0; - -@@ -395,18 +411,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu) - * be used directly by driver. To get the raw pptable, we need to - * rely on the combo pptable(and its revelant SMU message). - */ -- if (adev->scpm_enabled) { -- ret = smu_cmn_get_combo_pptable(smu); -- if (ret) -- return ret; -- -- smu->smu_table.power_play_table = combo_pptable; -- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table); -- } else { -- ret = smu_v13_0_setup_pptable(smu); -- if (ret) -- return ret; -- } -+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu, -+ &smu_table->power_play_table, -+ &smu_table->power_play_table_size); -+ if (ret) -+ return ret; - - ret = smu_v13_0_7_store_powerplay_table(smu); - if (ret) -diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c -index a92ffde53f0b3..db2f847c8535f 100644 ---- a/drivers/gpu/drm/msm/msm_rd.c -+++ b/drivers/gpu/drm/msm/msm_rd.c -@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file) - file->private_data = rd; - rd->open = true; - -+ /* Reset fifo to clear any previously unread data: */ -+ rd->fifo.head = rd->fifo.tail = 0; -+ - /* the parsing tools need to know gpu-id to know which - * register database to load. - * -diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h -index 6a5cc11aefd89..35dddc5015b37 100644 ---- a/drivers/hid/intel-ish-hid/ishtp-hid.h -+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h -@@ -105,7 +105,7 @@ struct report_list { - * @multi_packet_cnt: Count of fragmented packet count - * - * This structure is used to store completion flags and per client data like -- * like report description, number of HID devices etc. -+ * report description, number of HID devices etc. - */ - struct ishtp_cl_data { - /* completion flags */ -diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c -index 405e0d5212cc8..df0a825694f52 100644 ---- a/drivers/hid/intel-ish-hid/ishtp/client.c -+++ b/drivers/hid/intel-ish-hid/ishtp/client.c -@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb) - } - - /** -- * ipc_tx_callback() - IPC tx callback function -+ * ipc_tx_send() - IPC tx send function - * @prm: Pointer to client device instance - * -- * Send message over IPC either first time or on callback on previous message -- * completion -+ * Send message over IPC. Message will be split into fragments -+ * if message size is bigger than IPC FIFO size, and all -+ * fragments will be sent one by one. - */ --static void ipc_tx_callback(void *prm) -+static void ipc_tx_send(void *prm) - { - struct ishtp_cl *cl = prm; - struct ishtp_cl_tx_ring *cl_msg; -@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm) - list); - rem = cl_msg->send_buf.size - cl->tx_offs; - -- ishtp_hdr.host_addr = cl->host_client_id; -- ishtp_hdr.fw_addr = cl->fw_client_id; -- ishtp_hdr.reserved = 0; -- pmsg = cl_msg->send_buf.data + cl->tx_offs; -+ while (rem > 0) { -+ ishtp_hdr.host_addr = cl->host_client_id; -+ ishtp_hdr.fw_addr = cl->fw_client_id; -+ ishtp_hdr.reserved = 0; -+ pmsg = cl_msg->send_buf.data + cl->tx_offs; -+ -+ if (rem <= dev->mtu) { -+ /* Last fragment or only one packet */ -+ ishtp_hdr.length = rem; -+ ishtp_hdr.msg_complete = 1; -+ /* Submit to IPC queue with no callback */ -+ ishtp_write_message(dev, &ishtp_hdr, pmsg); -+ cl->tx_offs = 0; -+ cl->sending = 0; - -- if (rem <= dev->mtu) { -- ishtp_hdr.length = rem; -- ishtp_hdr.msg_complete = 1; -- cl->sending = 0; -- list_del_init(&cl_msg->list); /* Must be before write */ -- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); -- /* Submit to IPC queue with no callback */ -- ishtp_write_message(dev, &ishtp_hdr, pmsg); -- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); -- list_add_tail(&cl_msg->list, &cl->tx_free_list.list); -- ++cl->tx_ring_free_size; -- spin_unlock_irqrestore(&cl->tx_free_list_spinlock, -- tx_free_flags); -- } else { -- /* Send IPC fragment */ -- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); -- cl->tx_offs += dev->mtu; -- ishtp_hdr.length = dev->mtu; -- ishtp_hdr.msg_complete = 0; -- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl); -+ break; -+ } else { -+ /* Send ipc fragment */ -+ ishtp_hdr.length = dev->mtu; -+ ishtp_hdr.msg_complete = 0; -+ /* All fregments submitted to IPC queue with no callback */ -+ ishtp_write_message(dev, &ishtp_hdr, pmsg); -+ cl->tx_offs += dev->mtu; -+ rem = cl_msg->send_buf.size - cl->tx_offs; -+ } - } -+ -+ list_del_init(&cl_msg->list); -+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); -+ -+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); -+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list); -+ ++cl->tx_ring_free_size; -+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, -+ tx_free_flags); - } - - /** -@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev, - return; - - cl->tx_offs = 0; -- ipc_tx_callback(cl); -+ ipc_tx_send(cl); - ++cl->send_msg_cnt_ipc; - } - -diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c -index d003ad864ee44..a6e5d350a94ce 100644 ---- a/drivers/infiniband/hw/irdma/uk.c -+++ b/drivers/infiniband/hw/irdma/uk.c -@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, - FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); - i = 0; - } else { -- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list, -+ qp->wqe_ops.iw_set_fragment(wqe, 0, -+ frag_cnt ? op_info->sg_list : NULL, - qp->swqe_polarity); - i = 1; - } -diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c -index 08371a80fdc26..be189e0525de6 100644 ---- a/drivers/infiniband/hw/mlx5/cq.c -+++ b/drivers/infiniband/hw/mlx5/cq.c -@@ -523,6 +523,10 @@ repoll: - "Requestor" : "Responder", cq->mcq.cqn); - mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n", - err_cqe->syndrome, err_cqe->vendor_err_synd); -+ if (wc->status != IB_WC_WR_FLUSH_ERR && -+ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR) -+ dev->umrc.state = MLX5_UMR_STATE_RECOVER; -+ - if (opcode == MLX5_CQE_REQ_ERR) { - wq = &(*cur_qp)->sq; - wqe_ctr = be16_to_cpu(cqe64->wqe_counter); -diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c -index 63c89a72cc352..bb13164124fdb 100644 ---- a/drivers/infiniband/hw/mlx5/main.c -+++ b/drivers/infiniband/hw/mlx5/main.c -@@ -4336,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev, - dev->mdev = mdev; - dev->num_ports = num_ports; - -- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev)) -+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev)) - profile = &raw_eth_profile; - else - profile = &pf_profile; -diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h -index 998b67509a533..c2cca032a6ed4 100644 ---- a/drivers/infiniband/hw/mlx5/mlx5_ib.h -+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h -@@ -717,13 +717,24 @@ struct mlx5_ib_umr_context { - struct completion done; - }; - -+enum { -+ MLX5_UMR_STATE_UNINIT, -+ MLX5_UMR_STATE_ACTIVE, -+ MLX5_UMR_STATE_RECOVER, -+ MLX5_UMR_STATE_ERR, -+}; -+ - struct umr_common { - struct ib_pd *pd; - struct ib_cq *cq; - struct ib_qp *qp; -- /* control access to UMR QP -+ /* Protects from UMR QP overflow - */ - struct semaphore sem; -+ /* Protects from using UMR while the UMR is not active -+ */ -+ struct mutex lock; -+ unsigned int state; - }; - - struct mlx5_cache_ent { -diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c -index 3a48364c09181..d5105b5c9979b 100644 ---- a/drivers/infiniband/hw/mlx5/umr.c -+++ b/drivers/infiniband/hw/mlx5/umr.c -@@ -176,6 +176,8 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev) - dev->umrc.pd = pd; - - sema_init(&dev->umrc.sem, MAX_UMR_WR); -+ mutex_init(&dev->umrc.lock); -+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE; - - return 0; - -@@ -190,11 +192,38 @@ destroy_pd: - - void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev) - { -+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT) -+ return; - ib_destroy_qp(dev->umrc.qp); - ib_free_cq(dev->umrc.cq); - ib_dealloc_pd(dev->umrc.pd); - } - -+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev) -+{ -+ struct umr_common *umrc = &dev->umrc; -+ struct ib_qp_attr attr; -+ int err; -+ -+ attr.qp_state = IB_QPS_RESET; -+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE); -+ if (err) { -+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); -+ goto err; -+ } -+ -+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp); -+ if (err) -+ goto err; -+ -+ umrc->state = MLX5_UMR_STATE_ACTIVE; -+ return 0; -+ -+err: -+ umrc->state = MLX5_UMR_STATE_ERR; -+ return err; -+} -+ - static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, - struct mlx5r_umr_wqe *wqe, bool with_data) - { -@@ -231,7 +260,7 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, - - id.ib_cqe = cqe; - mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0, -- MLX5_FENCE_MODE_NONE, MLX5_OPCODE_UMR); -+ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR); - - mlx5r_ring_db(qp, 1, ctrl); - -@@ -270,17 +299,49 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, - mlx5r_umr_init_context(&umr_context); - - down(&umrc->sem); -- err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, -- with_data); -- if (err) -- mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err); -- else { -- wait_for_completion(&umr_context.done); -- if (umr_context.status != IB_WC_SUCCESS) { -- mlx5_ib_warn(dev, "reg umr failed (%u)\n", -- umr_context.status); -+ while (true) { -+ mutex_lock(&umrc->lock); -+ if (umrc->state == MLX5_UMR_STATE_ERR) { -+ mutex_unlock(&umrc->lock); - err = -EFAULT; -+ break; -+ } -+ -+ if (umrc->state == MLX5_UMR_STATE_RECOVER) { -+ mutex_unlock(&umrc->lock); -+ usleep_range(3000, 5000); -+ continue; -+ } -+ -+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, -+ with_data); -+ mutex_unlock(&umrc->lock); -+ if (err) { -+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n", -+ err); -+ break; - } -+ -+ wait_for_completion(&umr_context.done); -+ -+ if (umr_context.status == IB_WC_SUCCESS) -+ break; -+ -+ if (umr_context.status == IB_WC_WR_FLUSH_ERR) -+ continue; -+ -+ WARN_ON_ONCE(1); -+ mlx5_ib_warn(dev, -+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n", -+ umr_context.status); -+ mutex_lock(&umrc->lock); -+ err = mlx5r_umr_recover(dev); -+ mutex_unlock(&umrc->lock); -+ if (err) -+ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n", -+ err); -+ err = -EFAULT; -+ break; - } - up(&umrc->sem); - return err; -diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c -index b2a68bc9f0b4d..b86de1312512b 100644 ---- a/drivers/input/joystick/iforce/iforce-main.c -+++ b/drivers/input/joystick/iforce/iforce-main.c -@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = { - { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce }, - { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce }, - { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce }, -+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce }, - { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? - { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? - { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, -diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c -index 40ac3a78d90ef..c0464959cbcdb 100644 ---- a/drivers/iommu/intel/iommu.c -+++ b/drivers/iommu/intel/iommu.c -@@ -168,38 +168,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re) - return re->hi & VTD_PAGE_MASK; - } - --static inline void context_clear_pasid_enable(struct context_entry *context) --{ -- context->lo &= ~(1ULL << 11); --} -- --static inline bool context_pasid_enabled(struct context_entry *context) --{ -- return !!(context->lo & (1ULL << 11)); --} -- --static inline void context_set_copied(struct context_entry *context) --{ -- context->hi |= (1ull << 3); --} -- --static inline bool context_copied(struct context_entry *context) --{ -- return !!(context->hi & (1ULL << 3)); --} -- --static inline bool __context_present(struct context_entry *context) --{ -- return (context->lo & 1); --} -- --bool context_present(struct context_entry *context) --{ -- return context_pasid_enabled(context) ? -- __context_present(context) : -- __context_present(context) && !context_copied(context); --} -- - static inline void context_set_present(struct context_entry *context) - { - context->lo |= 1; -@@ -247,6 +215,26 @@ static inline void context_clear_entry(struct context_entry *context) - context->hi = 0; - } - -+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) -+{ -+ if (!iommu->copied_tables) -+ return false; -+ -+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables); -+} -+ -+static inline void -+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) -+{ -+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables); -+} -+ -+static inline void -+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn) -+{ -+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables); -+} -+ - /* - * This domain is a statically identity mapping domain. - * 1. This domain creats a static 1:1 mapping to all usable memory. -@@ -644,6 +632,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, - struct context_entry *context; - u64 *entry; - -+ /* -+ * Except that the caller requested to allocate a new entry, -+ * returning a copied context entry makes no sense. -+ */ -+ if (!alloc && context_copied(iommu, bus, devfn)) -+ return NULL; -+ - entry = &root->lo; - if (sm_supported(iommu)) { - if (devfn >= 0x80) { -@@ -1770,6 +1765,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu) - iommu->domain_ids = NULL; - } - -+ if (iommu->copied_tables) { -+ bitmap_free(iommu->copied_tables); -+ iommu->copied_tables = NULL; -+ } -+ - g_iommus[iommu->seq_id] = NULL; - - /* free context mapping */ -@@ -1978,7 +1978,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, - goto out_unlock; - - ret = 0; -- if (context_present(context)) -+ if (context_present(context) && !context_copied(iommu, bus, devfn)) - goto out_unlock; - - /* -@@ -1990,7 +1990,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, - * in-flight DMA will exist, and we don't need to worry anymore - * hereafter. - */ -- if (context_copied(context)) { -+ if (context_copied(iommu, bus, devfn)) { - u16 did_old = context_domain_id(context); - - if (did_old < cap_ndoms(iommu->cap)) { -@@ -2001,6 +2001,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, - iommu->flush.flush_iotlb(iommu, did_old, 0, 0, - DMA_TLB_DSI_FLUSH); - } -+ -+ clear_context_copied(iommu, bus, devfn); - } - - context_clear_entry(context); -@@ -2783,32 +2785,14 @@ static int copy_context_table(struct intel_iommu *iommu, - /* Now copy the context entry */ - memcpy(&ce, old_ce + idx, sizeof(ce)); - -- if (!__context_present(&ce)) -+ if (!context_present(&ce)) - continue; - - did = context_domain_id(&ce); - if (did >= 0 && did < cap_ndoms(iommu->cap)) - set_bit(did, iommu->domain_ids); - -- /* -- * We need a marker for copied context entries. This -- * marker needs to work for the old format as well as -- * for extended context entries. -- * -- * Bit 67 of the context entry is used. In the old -- * format this bit is available to software, in the -- * extended format it is the PGE bit, but PGE is ignored -- * by HW if PASIDs are disabled (and thus still -- * available). -- * -- * So disable PASIDs first and then mark the entry -- * copied. This means that we don't copy PASID -- * translations from the old kernel, but this is fine as -- * faults there are not fatal. -- */ -- context_clear_pasid_enable(&ce); -- context_set_copied(&ce); -- -+ set_context_copied(iommu, bus, devfn); - new_ce[idx] = ce; - } - -@@ -2835,8 +2819,8 @@ static int copy_translation_tables(struct intel_iommu *iommu) - bool new_ext, ext; - - rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); -- ext = !!(rtaddr_reg & DMA_RTADDR_RTT); -- new_ext = !!ecap_ecs(iommu->ecap); -+ ext = !!(rtaddr_reg & DMA_RTADDR_SMT); -+ new_ext = !!sm_supported(iommu); - - /* - * The RTT bit can only be changed when translation is disabled, -@@ -2847,6 +2831,10 @@ static int copy_translation_tables(struct intel_iommu *iommu) - if (new_ext != ext) - return -EINVAL; - -+ iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); -+ if (!iommu->copied_tables) -+ return -ENOMEM; -+ - old_rt_phys = rtaddr_reg & VTD_PAGE_MASK; - if (!old_rt_phys) - return -EINVAL; -diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c -index c28f8cc00d1cf..a9cc85882b315 100644 ---- a/drivers/net/ethernet/broadcom/tg3.c -+++ b/drivers/net/ethernet/broadcom/tg3.c -@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev) - struct net_device *dev = pci_get_drvdata(pdev); - struct tg3 *tp = netdev_priv(dev); - -+ tg3_reset_task_cancel(tp); -+ - rtnl_lock(); -+ - netif_device_detach(dev); - - if (netif_running(dev)) - dev_close(dev); - -- if (system_state == SYSTEM_POWER_OFF) -- tg3_power_down(tp); -+ tg3_power_down(tp); - - rtnl_unlock(); -+ -+ pci_disable_device(pdev); - } - - /** -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c -index cfb8bedba5124..079fa44ada71e 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c -@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) - sw_owner_id[i]); - } - -+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) && -+ dev->priv.sw_vhca_id > 0) -+ MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id); -+ - return mlx5_cmd_exec_in(dev, init_hca, in); - } - -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c -index 616207c3b187a..6c8bb74bd8fc6 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/main.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c -@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444); - MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); - - static u32 sw_owner_id[4]; -+#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1) -+static DEFINE_IDA(sw_vhca_ida); - - enum { - MLX5_ATOMIC_REQ_MODE_BE = 0x0, -@@ -499,6 +501,49 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev) - return err; - } - -+bool mlx5_is_roce_on(struct mlx5_core_dev *dev) -+{ -+ struct devlink *devlink = priv_to_devlink(dev); -+ union devlink_param_value val; -+ int err; -+ -+ err = devlink_param_driverinit_value_get(devlink, -+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, -+ &val); -+ -+ if (!err) -+ return val.vbool; -+ -+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err); -+ return MLX5_CAP_GEN(dev, roce); -+} -+EXPORT_SYMBOL(mlx5_is_roce_on); -+ -+static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) -+{ -+ void *set_hca_cap; -+ int err; -+ -+ if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2)) -+ return 0; -+ -+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2); -+ if (err) -+ return err; -+ -+ if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) || -+ !(dev->priv.sw_vhca_id > 0)) -+ return 0; -+ -+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, -+ capability); -+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur, -+ MLX5_ST_SZ_BYTES(cmd_hca_cap_2)); -+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1); -+ -+ return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2); -+} -+ - static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) - { - struct mlx5_profile *prof = &dev->profile; -@@ -577,7 +622,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) - MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix)); - - if (MLX5_CAP_GEN(dev, roce_rw_supported)) -- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev)); -+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce, -+ mlx5_is_roce_on(dev)); - - max_uc_list = max_uc_list_get_devlink_param(dev); - if (max_uc_list > 0) -@@ -603,7 +649,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) - */ - static bool is_roce_fw_disabled(struct mlx5_core_dev *dev) - { -- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) || -+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) || - (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce)); - } - -@@ -669,6 +715,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev) - goto out; - } - -+ memset(set_ctx, 0, set_sz); -+ err = handle_hca_cap_2(dev, set_ctx); -+ if (err) { -+ mlx5_core_err(dev, "handle_hca_cap_2 failed\n"); -+ goto out; -+ } -+ - out: - kfree(set_ctx); - return err; -@@ -1512,6 +1565,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) - if (err) - goto err_hca_caps; - -+ /* The conjunction of sw_vhca_id with sw_owner_id will be a global -+ * unique id per function which uses mlx5_core. -+ * Those values are supplied to FW as part of the init HCA command to -+ * be used by both driver and FW when it's applicable. -+ */ -+ dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1, -+ MAX_SW_VHCA_ID, -+ GFP_KERNEL); -+ if (dev->priv.sw_vhca_id < 0) -+ mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n", -+ dev->priv.sw_vhca_id); -+ - return 0; - - err_hca_caps: -@@ -1537,6 +1602,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) - { - struct mlx5_priv *priv = &dev->priv; - -+ if (priv->sw_vhca_id > 0) -+ ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id); -+ - mlx5_hca_caps_free(dev); - mlx5_adev_cleanup(dev); - mlx5_pagealloc_cleanup(dev); -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c -index ac020cb780727..d5c3173250309 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c -@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, - goto free; - - MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1); -- MLX5_SET(modify_nic_vport_context_in, in, -- nic_vport_context.affiliated_vhca_id, -- MLX5_CAP_GEN(master_mdev, vhca_id)); -+ if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) { -+ MLX5_SET(modify_nic_vport_context_in, in, -+ nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW); -+ MLX5_SET(modify_nic_vport_context_in, in, -+ nic_vport_context.affiliated_vhca_id, -+ MLX5_CAP_GEN_2(master_mdev, sw_vhca_id)); -+ } else { -+ MLX5_SET(modify_nic_vport_context_in, in, -+ nic_vport_context.affiliated_vhca_id, -+ MLX5_CAP_GEN(master_mdev, vhca_id)); -+ } - MLX5_SET(modify_nic_vport_context_in, in, - nic_vport_context.affiliation_criteria, - MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria)); -diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c -index 1e1f40f628a02..c69b87d3837da 100644 ---- a/drivers/net/ieee802154/cc2520.c -+++ b/drivers/net/ieee802154/cc2520.c -@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb) - goto err_tx; - - if (status & CC2520_STATUS_TX_UNDERFLOW) { -+ rc = -EINVAL; - dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n"); - goto err_tx; - } -diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c -index 2de09ad5bac03..e11f70911acc1 100644 ---- a/drivers/net/usb/cdc_ether.c -+++ b/drivers/net/usb/cdc_ether.c -@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = { - }, - #endif - -+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */ -+{ -+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM, -+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), -+ .driver_info = 0, -+}, -+ - /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ - { - USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, -diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c -index d142ac8fcf6e2..688905ea0a6d3 100644 ---- a/drivers/net/usb/r8152.c -+++ b/drivers/net/usb/r8152.c -@@ -770,6 +770,7 @@ enum rtl8152_flags { - RX_EPROTO, - }; - -+#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054 - #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 - #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c - #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 -@@ -9581,6 +9582,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) - - if (vendor_id == VENDOR_ID_LENOVO) { - switch (product_id) { -+ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK: - case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: - case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: - case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: -@@ -9828,6 +9830,7 @@ static const struct usb_device_id rtl8152_table[] = { - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927), - REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f), -+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082), -diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c -index 73d9fcba3b1c0..9f6614f7dbeb1 100644 ---- a/drivers/nvme/host/pci.c -+++ b/drivers/nvme/host/pci.c -@@ -3517,6 +3517,8 @@ static const struct pci_device_id nvme_id_table[] = { - .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, - { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ - .driver_data = NVME_QUIRK_BOGUS_NID, }, -+ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ -+ .driver_data = NVME_QUIRK_BOGUS_NID, }, - { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), - .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, - { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), -diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c -index dc3b4dc8fe08b..a3694a32f6d52 100644 ---- a/drivers/nvme/target/tcp.c -+++ b/drivers/nvme/target/tcp.c -@@ -1506,6 +1506,9 @@ static void nvmet_tcp_state_change(struct sock *sk) - goto done; - - switch (sk->sk_state) { -+ case TCP_FIN_WAIT2: -+ case TCP_LAST_ACK: -+ break; - case TCP_FIN_WAIT1: - case TCP_CLOSE_WAIT: - case TCP_CLOSE: -diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c -index 68eb61c65d345..de4a7b3e5966e 100644 ---- a/drivers/peci/cpu.c -+++ b/drivers/peci/cpu.c -@@ -188,8 +188,6 @@ static void adev_release(struct device *dev) - { - struct auxiliary_device *adev = to_auxiliary_dev(dev); - -- auxiliary_device_uninit(adev); -- - kfree(adev->name); - kfree(adev); - } -@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev) - struct auxiliary_device *adev = _adev; - - auxiliary_device_delete(adev); -+ auxiliary_device_uninit(adev); - } - - static int devm_adev_add(struct device *dev, int idx) -diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c -index 513de1f54e2d7..933b96e243b84 100644 ---- a/drivers/perf/arm_pmu_platform.c -+++ b/drivers/perf/arm_pmu_platform.c -@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) - - if (num_irqs == 1) { - int irq = platform_get_irq(pdev, 0); -- if (irq && irq_is_percpu_devid(irq)) -+ if ((irq > 0) && irq_is_percpu_devid(irq)) - return pmu_parse_percpu_irq(pmu, irq); - } - -diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c -index ce2bd88feeaa8..08019c6ccc9ca 100644 ---- a/drivers/platform/surface/surface_aggregator_registry.c -+++ b/drivers/platform/surface/surface_aggregator_registry.c -@@ -556,6 +556,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { - /* Surface Laptop Go 1 */ - { "MSHW0118", (unsigned long)ssam_node_group_slg1 }, - -+ /* Surface Laptop Go 2 */ -+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 }, -+ - /* Surface Laptop Studio */ - { "MSHW0123", (unsigned long)ssam_node_group_sls }, - -diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c -index 9c6943e401a6c..0fbcaffabbfc7 100644 ---- a/drivers/platform/x86/acer-wmi.c -+++ b/drivers/platform/x86/acer-wmi.c -@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { - {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ - {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ - {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ -+ {KE_KEY, 0x27, {KEY_HELP} }, - {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ - {KE_IGNORE, 0x41, {KEY_MUTE} }, - {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, -@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { - {KE_IGNORE, 0x48, {KEY_VOLUMEUP} }, - {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} }, - {KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} }, -- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} }, -+ /* -+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event -+ * with the "Video Bus" input device events. But sometimes it is not -+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that -+ * udev/hwdb can override it on systems where it is not a dup. -+ */ -+ {KE_KEY, 0x61, {KEY_UNKNOWN} }, - {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} }, - {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} }, - {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ -diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c -index 62ce198a34631..a0f31624aee97 100644 ---- a/drivers/platform/x86/asus-wmi.c -+++ b/drivers/platform/x86/asus-wmi.c -@@ -107,7 +107,7 @@ module_param(fnlock_default, bool, 0444); - #define WMI_EVENT_MASK 0xFFFF - - #define FAN_CURVE_POINTS 8 --#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2) -+#define FAN_CURVE_BUF_LEN 32 - #define FAN_CURVE_DEV_CPU 0x00 - #define FAN_CURVE_DEV_GPU 0x01 - /* Mask to determine if setting temperature or percentage */ -@@ -2208,8 +2208,10 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev) - curves = &asus->custom_fan_curves[fan_idx]; - err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf, - FAN_CURVE_BUF_LEN); -- if (err) -+ if (err) { -+ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err); - return err; -+ } - - fan_curve_copy_from_buf(curves, buf); - curves->device_id = fan_dev; -@@ -2227,9 +2229,6 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available, - - err = fan_curve_get_factory_default(asus, fan_dev); - if (err) { -- pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n", -- fan_dev, err); -- /* Don't cause probe to fail on devices without fan-curves */ - return 0; - } - -diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h -index 4051c8cd0cd8a..23ab3b048d9be 100644 ---- a/drivers/usb/storage/unusual_uas.h -+++ b/drivers/usb/storage/unusual_uas.h -@@ -62,6 +62,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128, - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_IGNORE_UAS), - -+/* Reported-by: Tom Hu */ -+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999, -+ "ASUS", -+ "External HDD", -+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, -+ US_FL_IGNORE_UAS), -+ - /* Reported-by: David Webb */ - UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, - "Seagate", -diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h -index 5fcf89faa31ab..d72626d71258f 100644 ---- a/include/linux/intel-iommu.h -+++ b/include/linux/intel-iommu.h -@@ -196,7 +196,6 @@ - #define ecap_dis(e) (((e) >> 27) & 0x1) - #define ecap_nest(e) (((e) >> 26) & 0x1) - #define ecap_mts(e) (((e) >> 25) & 0x1) --#define ecap_ecs(e) (((e) >> 24) & 0x1) - #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) - #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) - #define ecap_coherent(e) ((e) & 0x1) -@@ -264,7 +263,6 @@ - #define DMA_GSTS_CFIS (((u32)1) << 23) - - /* DMA_RTADDR_REG */ --#define DMA_RTADDR_RTT (((u64)1) << 11) - #define DMA_RTADDR_SMT (((u64)1) << 10) - - /* CCMD_REG */ -@@ -579,6 +577,7 @@ struct intel_iommu { - - #ifdef CONFIG_INTEL_IOMMU - unsigned long *domain_ids; /* bitmap of domains */ -+ unsigned long *copied_tables; /* bitmap of copied tables */ - spinlock_t lock; /* protect context, domain ids */ - struct root_entry *root_entry; /* virtual address */ - -@@ -692,6 +691,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte) - (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; - } - -+static inline bool context_present(struct context_entry *context) -+{ -+ return (context->lo & 1); -+} -+ - extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); - - extern int dmar_enable_qi(struct intel_iommu *iommu); -@@ -776,7 +780,6 @@ static inline void intel_iommu_debugfs_init(void) {} - #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ - - extern const struct attribute_group *intel_iommu_groups[]; --bool context_present(struct context_entry *context); - struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, - u8 devfn, int alloc); - -diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h -index b0b4ac92354a2..b3ea245faa515 100644 ---- a/include/linux/mlx5/driver.h -+++ b/include/linux/mlx5/driver.h -@@ -606,6 +606,7 @@ struct mlx5_priv { - spinlock_t ctx_lock; - struct mlx5_adev **adev; - int adev_idx; -+ int sw_vhca_id; - struct mlx5_events *events; - - struct mlx5_flow_steering *steering; -@@ -1274,16 +1275,17 @@ enum { - MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, - }; - --static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev) -+bool mlx5_is_roce_on(struct mlx5_core_dev *dev); -+ -+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) - { -- struct devlink *devlink = priv_to_devlink(dev); -- union devlink_param_value val; -- int err; -- -- err = devlink_param_driverinit_value_get(devlink, -- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, -- &val); -- return err ? MLX5_CAP_GEN(dev, roce) : val.vbool; -+ if (MLX5_CAP_GEN(dev, roce_rw_supported)) -+ return MLX5_CAP_GEN(dev, roce); -+ -+ /* If RoCE cap is read-only in FW, get RoCE state from devlink -+ * in order to support RoCE enable/disable feature -+ */ -+ return mlx5_is_roce_on(dev); - } - - #endif /* MLX5_DRIVER_H */ -diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h -index fd7d083a34d33..6d57e5ec9718d 100644 ---- a/include/linux/mlx5/mlx5_ifc.h -+++ b/include/linux/mlx5/mlx5_ifc.h -@@ -1804,7 +1804,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { - u8 max_reformat_remove_size[0x8]; - u8 max_reformat_remove_offset[0x8]; - -- u8 reserved_at_c0[0x740]; -+ u8 reserved_at_c0[0x160]; -+ -+ u8 reserved_at_220[0x1]; -+ u8 sw_vhca_id_valid[0x1]; -+ u8 sw_vhca_id[0xe]; -+ u8 reserved_at_230[0x10]; -+ -+ u8 reserved_at_240[0x5c0]; - }; - - enum mlx5_ifc_flow_destination_type { -@@ -3715,6 +3722,11 @@ struct mlx5_ifc_rmpc_bits { - struct mlx5_ifc_wq_bits wq; - }; - -+enum { -+ VHCA_ID_TYPE_HW = 0, -+ VHCA_ID_TYPE_SW = 1, -+}; -+ - struct mlx5_ifc_nic_vport_context_bits { - u8 reserved_at_0[0x5]; - u8 min_wqe_inline_mode[0x3]; -@@ -3731,8 +3743,8 @@ struct mlx5_ifc_nic_vport_context_bits { - u8 event_on_mc_address_change[0x1]; - u8 event_on_uc_address_change[0x1]; - -- u8 reserved_at_40[0xc]; -- -+ u8 vhca_id_type[0x1]; -+ u8 reserved_at_41[0xb]; - u8 affiliation_criteria[0x4]; - u8 affiliated_vhca_id[0x10]; - -@@ -7189,7 +7201,12 @@ struct mlx5_ifc_init_hca_in_bits { - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - -- u8 reserved_at_40[0x40]; -+ u8 reserved_at_40[0x20]; -+ -+ u8 reserved_at_60[0x2]; -+ u8 sw_vhca_id[0xe]; -+ u8 reserved_at_70[0x10]; -+ - u8 sw_owner_id[4][0x20]; - }; - -diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c -index cbdf0e2bc5ae0..d0fb74b0db1d5 100644 ---- a/net/bluetooth/mgmt.c -+++ b/net/bluetooth/mgmt.c -@@ -4420,6 +4420,22 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, - MGMT_STATUS_NOT_SUPPORTED); - } - -+static u32 get_params_flags(struct hci_dev *hdev, -+ struct hci_conn_params *params) -+{ -+ u32 flags = hdev->conn_flags; -+ -+ /* Devices using RPAs can only be programmed in the acceptlist if -+ * LL Privacy has been enable otherwise they cannot mark -+ * HCI_CONN_FLAG_REMOTE_WAKEUP. -+ */ -+ if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) && -+ hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) -+ flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP; -+ -+ return flags; -+} -+ - static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, - u16 data_len) - { -@@ -4451,10 +4467,10 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, - } else { - params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, - le_addr_type(cp->addr.type)); -- - if (!params) - goto done; - -+ supported_flags = get_params_flags(hdev, params); - current_flags = params->flags; - } - -@@ -4523,38 +4539,35 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, - bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", - &cp->addr.bdaddr, cp->addr.type); - } -- } else { -- params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, -- le_addr_type(cp->addr.type)); -- if (params) { -- /* Devices using RPAs can only be programmed in the -- * acceptlist LL Privacy has been enable otherwise they -- * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP. -- */ -- if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && -- !use_ll_privacy(hdev) && -- hci_find_irk_by_addr(hdev, ¶ms->addr, -- params->addr_type)) { -- bt_dev_warn(hdev, -- "Cannot set wakeable for RPA"); -- goto unlock; -- } - -- params->flags = current_flags; -- status = MGMT_STATUS_SUCCESS; -+ goto unlock; -+ } - -- /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY -- * has been set. -- */ -- if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) -- hci_update_passive_scan(hdev); -- } else { -- bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", -- &cp->addr.bdaddr, -- le_addr_type(cp->addr.type)); -- } -+ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, -+ le_addr_type(cp->addr.type)); -+ if (!params) { -+ bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", -+ &cp->addr.bdaddr, le_addr_type(cp->addr.type)); -+ goto unlock; -+ } -+ -+ supported_flags = get_params_flags(hdev, params); -+ -+ if ((supported_flags | current_flags) != supported_flags) { -+ bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", -+ current_flags, supported_flags); -+ goto unlock; - } - -+ params->flags = current_flags; -+ status = MGMT_STATUS_SUCCESS; -+ -+ /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY -+ * has been set. -+ */ -+ if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) -+ hci_update_passive_scan(hdev); -+ - unlock: - hci_dev_unlock(hdev); - -diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c -index eb204ad36eeec..846588c0070a5 100644 ---- a/net/dsa/tag_hellcreek.c -+++ b/net/dsa/tag_hellcreek.c -@@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb, - - skb->dev = dsa_master_find_slave(dev, 0, port); - if (!skb->dev) { -- netdev_warn(dev, "Failed to get source port: %d\n", port); -+ netdev_warn_once(dev, "Failed to get source port: %d\n", port); - return NULL; - } - diff --git a/sys-kernel/pinephone-sources/files/5010_enable-cpu-optimizations-universal.patch b/sys-kernel/pinephone-sources/files/5010_enable-cpu-optimizations-universal.patch index b9c03cb..0841340 100644 --- a/sys-kernel/pinephone-sources/files/5010_enable-cpu-optimizations-universal.patch +++ b/sys-kernel/pinephone-sources/files/5010_enable-cpu-optimizations-universal.patch @@ -1,10 +1,7 @@ -From b5892719c43f739343c628e3d357471a3bdaa368 Mon Sep 17 00:00:00 2001 -From: graysky -Date: Tue, 15 Mar 2022 05:58:43 -0400 +From a0825feea3f100656d58446885b5f190284fd219 +From: graysky +Date: Fri, 4 Nov 2022 15:34:36 -0400 Subject: [PATCH] more uarches for kernel 5.17+ -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit FEATURES This patch adds additional CPU options to the Linux kernel accessible under: @@ -36,6 +33,7 @@ CPU-specific microarchitectures include: • AMD Family 17h (Zen) • AMD Family 17h (Zen 2) • AMD Family 19h (Zen 3)† +• AMD Family 19h (Zen 4)§ • Intel Silvermont low-power processors • Intel Goldmont low-power processors (Apollo Lake and Denverton) • Intel Goldmont Plus low-power processors (Gemini Lake) @@ -55,11 +53,14 @@ CPU-specific microarchitectures include: • Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡ • Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡ • Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡ +• Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)§ +• Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)§ Notes: If not otherwise noted, gcc >=9.1 is required for support. *Requires gcc >=10.1 or clang >=10.0 †Required gcc >=10.3 or clang >=12.0 ‡Required gcc >=11.1 or clang >=12.0 + §Required gcc >=13.0 or clang >=15.0.5 It also offers to compile passing the 'native' option which, "selects the CPU to generate code for at compilation time by determining the processor type of @@ -99,20 +100,19 @@ REFERENCES 4. https://github.com/graysky2/kernel_gcc_patch/issues/15 5. http://www.linuxforge.net/docs/linux/linux-gcc.php -Signed-off-by: graysky --- - arch/x86/Kconfig.cpu | 332 ++++++++++++++++++++++++++++++-- - arch/x86/Makefile | 40 +++- - arch/x86/include/asm/vermagic.h | 66 +++++++ - 3 files changed, 424 insertions(+), 14 deletions(-) + arch/x86/Kconfig.cpu | 416 ++++++++++++++++++++++++++++++-- + arch/x86/Makefile | 43 +++- + arch/x86/include/asm/vermagic.h | 72 ++++++ + 3 files changed, 514 insertions(+), 17 deletions(-) diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu -index 542377cd419d..22b919cdb6d1 100644 +index 542377cd419d..08d887d1220d 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -157,7 +157,7 @@ config MPENTIUM4 - - + + config MK6 - bool "K6/K6-II/K6-III" + bool "AMD K6/K6-II/K6-III" @@ -121,16 +121,16 @@ index 542377cd419d..22b919cdb6d1 100644 Select this for an AMD K6-family processor. Enables use of @@ -165,7 +165,7 @@ config MK6 flags to GCC. - + config MK7 - bool "Athlon/Duron/K7" + bool "AMD Athlon/Duron/K7" depends on X86_32 help Select this for an AMD Athlon K7-family processor. Enables use of -@@ -173,12 +173,98 @@ config MK7 +@@ -173,12 +173,106 @@ config MK7 flags to GCC. - + config MK8 - bool "Opteron/Athlon64/Hammer/K8" + bool "AMD Opteron/Athlon64/Hammer/K8" @@ -138,7 +138,7 @@ index 542377cd419d..22b919cdb6d1 100644 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables use of some extended instructions, and passes appropriate optimization flags to GCC. - + +config MK8SSE3 + bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3" + help @@ -224,32 +224,40 @@ index 542377cd419d..22b919cdb6d1 100644 + Select this for AMD Family 19h Zen 3 processors. + + Enables -march=znver3 ++ ++config MZEN4 ++ bool "AMD Zen 4" ++ depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500) ++ help ++ Select this for AMD Family 19h Zen 4 processors. ++ ++ Enables -march=znver4 + config MCRUSOE bool "Crusoe" depends on X86_32 -@@ -270,7 +356,7 @@ config MPSC +@@ -270,7 +364,7 @@ config MPSC in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. - + config MCORE2 - bool "Core 2/newer Xeon" + bool "Intel Core 2" help - + Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and -@@ -278,6 +364,8 @@ config MCORE2 +@@ -278,6 +372,8 @@ config MCORE2 family in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo) - + + Enables -march=core2 + config MATOM bool "Intel Atom" help -@@ -287,6 +375,182 @@ config MATOM +@@ -287,6 +383,202 @@ config MATOM accordingly optimized code. Use a recent GCC with specific Atom support in order to fully benefit from selecting this option. - + +config MNEHALEM + bool "Intel Nehalem" + select X86_P6_NOP @@ -425,14 +433,34 @@ index 542377cd419d..22b919cdb6d1 100644 + Select this for twelfth-generation processors in the Alder Lake family. + + Enables -march=alderlake ++ ++config MRAPTORLAKE ++ bool "Intel Raptor Lake" ++ depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500) ++ select X86_P6_NOP ++ help ++ ++ Select this for thirteenth-generation processors in the Raptor Lake family. ++ ++ Enables -march=raptorlake ++ ++config MMETEORLAKE ++ bool "Intel Meteor Lake" ++ depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500) ++ select X86_P6_NOP ++ help ++ ++ Select this for fourteenth-generation processors in the Meteor Lake family. ++ ++ Enables -march=meteorlake + config GENERIC_CPU bool "Generic-x86-64" depends on X86_64 -@@ -294,6 +558,50 @@ config GENERIC_CPU +@@ -294,6 +586,50 @@ config GENERIC_CPU Generic x86-64 CPU. Run equally well on all x86-64 CPUs. - + +config GENERIC_CPU2 + bool "Generic-x86-64-v2" + depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) @@ -478,68 +506,133 @@ index 542377cd419d..22b919cdb6d1 100644 + Enables -march=native + endchoice - + config X86_GENERIC -@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT +@@ -318,9 +654,17 @@ config X86_INTERNODE_CACHE_SHIFT config X86_L1_CACHE_SHIFT int default "7" if MPENTIUM4 || MPSC - default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU -+ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4 ++ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 \ ++ || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \ ++ || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \ ++ || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \ ++ || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \ ++ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE \ ++ || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 \ ++ || GENERIC_CPU4 default "4" if MELAN || M486SX || M486 || MGEODEGX1 - default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX - -@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16 - +- default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX ++ default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \ ++ || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX + + config X86_F00F_BUG + def_bool y +@@ -332,15 +676,27 @@ config X86_INVD_BUG + + config X86_ALIGNMENT_16 + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1 ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \ ++ || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1 + config X86_INTEL_USERCOPY def_bool y - depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 -+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL - ++ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC \ ++ || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \ ++ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \ ++ || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \ ++ || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL + config X86_USE_PPRO_CHECKSUM def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM -+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD - ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \ ++ || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX \ ++ || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \ ++ || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \ ++ || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE \ ++ || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \ ++ || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \ ++ || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD + # # P6_NOPs are a relatively minor optimization that require a family >= -@@ -356,26 +664,26 @@ config X86_USE_PPRO_CHECKSUM +@@ -356,32 +712,62 @@ config X86_USE_PPRO_CHECKSUM config X86_P6_NOP def_bool y depends on X86_64 - depends on (MCORE2 || MPENTIUM4 || MPSC) -+ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL) - ++ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \ ++ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE \ ++ || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \ ++ || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL) + config X86_TSC def_bool y - depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64 -+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64 - ++ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM \ ++ || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 \ ++ || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER \ ++ || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \ ++ || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL \ ++ || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \ ++ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL \ ++ || MNATIVE_AMD) || X86_64 + config X86_CMPXCHG64 def_bool y - depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 -+ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD - ++ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \ ++ || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 \ ++ || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN \ ++ || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \ ++ || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \ ++ || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \ ++ || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD + # this should be set for all -march=.. options where the compiler # generates cmov. config X86_CMOV def_bool y - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) -+ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) - ++ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \ ++ || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 \ ++ || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR \ ++ || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \ ++ || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \ ++ || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \ ++ || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD) + config X86_MINIMUM_CPU_FAMILY int default "64" if X86_64 - default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8) -+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) ++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 \ ++ || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 \ ++ || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER \ ++ || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT \ ++ || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \ ++ || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \ ++ || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MRAPTORLAKE \ ++ || MNATIVE_INTEL || MNATIVE_AMD) default "5" if X86_32 && X86_CMPXCHG64 default "4" - + + config X86_DEBUGCTLMSR + def_bool y +- depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML ++ depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \ ++ || M486SX || M486) && !UML + + config IA32_FEAT_CTL + def_bool y diff --git a/arch/x86/Makefile b/arch/x86/Makefile -index e84cdd409b64..7d3bbf060079 100644 +index bafbd905e6e7..7fae52788560 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile -@@ -131,8 +131,44 @@ else +@@ -150,8 +150,47 @@ else # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) cflags-$(CONFIG_MK8) += -march=k8 cflags-$(CONFIG_MPSC) += -march=nocona @@ -557,6 +650,7 @@ index e84cdd409b64..7d3bbf060079 100644 + cflags-$(CONFIG_MZEN) += -march=znver1 + cflags-$(CONFIG_MZEN2) += -march=znver2 + cflags-$(CONFIG_MZEN3) += -march=znver3 ++ cflags-$(CONFIG_MZEN4) += -march=znver4 + cflags-$(CONFIG_MNATIVE_INTEL) += -march=native + cflags-$(CONFIG_MNATIVE_AMD) += -march=native + cflags-$(CONFIG_MATOM) += -march=bonnell @@ -580,17 +674,19 @@ index e84cdd409b64..7d3bbf060079 100644 + cflags-$(CONFIG_MSAPPHIRERAPIDS) += -march=sapphirerapids + cflags-$(CONFIG_MROCKETLAKE) += -march=rocketlake + cflags-$(CONFIG_MALDERLAKE) += -march=alderlake ++ cflags-$(CONFIG_MRAPTORLAKE) += -march=raptorlake ++ cflags-$(CONFIG_MMETEORLAKE) += -march=meteorlake + cflags-$(CONFIG_GENERIC_CPU2) += -march=x86-64-v2 + cflags-$(CONFIG_GENERIC_CPU3) += -march=x86-64-v3 + cflags-$(CONFIG_GENERIC_CPU4) += -march=x86-64-v4 cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic KBUILD_CFLAGS += $(cflags-y) - + diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h -index 75884d2cdec3..4e6a08d4c7e5 100644 +index 75884d2cdec3..18021e8c0c28 100644 --- a/arch/x86/include/asm/vermagic.h +++ b/arch/x86/include/asm/vermagic.h -@@ -17,6 +17,48 @@ +@@ -17,6 +17,52 @@ #define MODULE_PROC_FAMILY "586MMX " #elif defined CONFIG_MCORE2 #define MODULE_PROC_FAMILY "CORE2 " @@ -636,10 +732,14 @@ index 75884d2cdec3..4e6a08d4c7e5 100644 +#define MODULE_PROC_FAMILY "ROCKETLAKE " +#elif defined CONFIG_MALDERLAKE +#define MODULE_PROC_FAMILY "ALDERLAKE " ++#elif defined CONFIG_MRAPTORLAKE ++#define MODULE_PROC_FAMILY "RAPTORLAKE " ++#elif defined CONFIG_MMETEORLAKE ++#define MODULE_PROC_FAMILY "METEORLAKE " #elif defined CONFIG_MATOM #define MODULE_PROC_FAMILY "ATOM " #elif defined CONFIG_M686 -@@ -35,6 +77,30 @@ +@@ -35,6 +81,32 @@ #define MODULE_PROC_FAMILY "K7 " #elif defined CONFIG_MK8 #define MODULE_PROC_FAMILY "K8 " @@ -667,9 +767,10 @@ index 75884d2cdec3..4e6a08d4c7e5 100644 +#define MODULE_PROC_FAMILY "ZEN2 " +#elif defined CONFIG_MZEN3 +#define MODULE_PROC_FAMILY "ZEN3 " ++#elif defined CONFIG_MZEN4 ++#define MODULE_PROC_FAMILY "ZEN4 " #elif defined CONFIG_MELAN #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE --- -2.35.1 - +-- +2.38.1 diff --git a/sys-kernel/pinephone-sources/files/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch b/sys-kernel/pinephone-sources/files/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch similarity index 95% rename from sys-kernel/pinephone-sources/files/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch rename to sys-kernel/pinephone-sources/files/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch index 610cfe8..783f3bc 100644 --- a/sys-kernel/pinephone-sources/files/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch +++ b/sys-kernel/pinephone-sources/files/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch @@ -1,8 +1,8 @@ diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index cc3ea8febc62..ab4c5a35b999 100644 +index 42af9ca0127e..31747ec54f9d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -5299,6 +5299,12 @@ +@@ -5406,6 +5406,12 @@ sa1100ir [NET] See drivers/net/irda/sa1100_ir.c. @@ -16,10 +16,10 @@ index cc3ea8febc62..ab4c5a35b999 100644 schedstats= [KNL,X86] Enable or disable scheduled statistics. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst -index ddccd1077462..e24781970a3d 100644 +index 98d1b198b2b4..d7c78a107f93 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst -@@ -1524,3 +1524,13 @@ is 10 seconds. +@@ -1552,3 +1552,13 @@ is 10 seconds. The softlockup threshold is (``2 * watchdog_thresh``). Setting this tunable to zero will disable lockup detection altogether. @@ -150,7 +150,7 @@ index 000000000000..05c84eec0f31 +priority boost from unblocking while background threads that do most of the +processing receive the priority penalty for using their entire timeslice. diff --git a/fs/proc/base.c b/fs/proc/base.c -index 8dfa36a99c74..46397c606e01 100644 +index 9e479d7d202b..2a8530021b23 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, @@ -176,10 +176,10 @@ index 8874f681b056..59eb72bf7d5f 100644 [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ } diff --git a/include/linux/sched.h b/include/linux/sched.h -index c46f3a63b758..7c65e6317d97 100644 +index ffb6eb55cd13..2e730a59caa2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -751,8 +751,14 @@ struct task_struct { +@@ -762,8 +762,14 @@ struct task_struct { unsigned int ptrace; #ifdef CONFIG_SMP @@ -195,7 +195,7 @@ index c46f3a63b758..7c65e6317d97 100644 unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; -@@ -766,6 +772,7 @@ struct task_struct { +@@ -777,6 +783,7 @@ struct task_struct { */ int recent_used_cpu; int wake_cpu; @@ -203,7 +203,7 @@ index c46f3a63b758..7c65e6317d97 100644 #endif int on_rq; -@@ -774,6 +781,20 @@ struct task_struct { +@@ -785,6 +792,20 @@ struct task_struct { int normal_prio; unsigned int rt_priority; @@ -224,7 +224,7 @@ index c46f3a63b758..7c65e6317d97 100644 struct sched_entity se; struct sched_rt_entity rt; struct sched_dl_entity dl; -@@ -784,6 +805,7 @@ struct task_struct { +@@ -795,6 +816,7 @@ struct task_struct { unsigned long core_cookie; unsigned int core_occupation; #endif @@ -232,7 +232,7 @@ index c46f3a63b758..7c65e6317d97 100644 #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; -@@ -1517,6 +1539,15 @@ struct task_struct { +@@ -1545,6 +1567,15 @@ struct task_struct { */ }; @@ -323,7 +323,7 @@ index ab83d85e1183..6af9ae681116 100644 * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h -index e5af028c08b4..0a7565d0d3cf 100644 +index 994c25640e15..8c050a59ece1 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) @@ -338,10 +338,10 @@ index e5af028c08b4..0a7565d0d3cf 100644 } diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h -index 56cffe42abbc..e020fc572b22 100644 +index 816df6cc444e..c8da08e18c91 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h -@@ -233,7 +233,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) +@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) #endif /* !CONFIG_SMP */ @@ -352,10 +352,10 @@ index 56cffe42abbc..e020fc572b22 100644 #else static inline void rebuild_sched_domains_energy(void) diff --git a/init/Kconfig b/init/Kconfig -index c7900e8975f1..d2b593e3807d 100644 +index 94125d3b6893..c87ba766d354 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -812,6 +812,7 @@ menu "Scheduler features" +@@ -819,6 +819,7 @@ menu "Scheduler features" config UCLAMP_TASK bool "Enable utilization clamping for RT/FAIR tasks" depends on CPU_FREQ_GOV_SCHEDUTIL @@ -363,13 +363,13 @@ index c7900e8975f1..d2b593e3807d 100644 help This feature enables the scheduler to track the clamped utilization of each CPU based on RUNNABLE tasks scheduled on that CPU. -@@ -858,6 +859,35 @@ config UCLAMP_BUCKETS_COUNT +@@ -865,6 +866,35 @@ config UCLAMP_BUCKETS_COUNT If in doubt, use the default value. +menuconfig SCHED_ALT + bool "Alternative CPU Schedulers" -+ default y ++ default n + help + This feature enable alternative CPU scheduler" + @@ -399,7 +399,7 @@ index c7900e8975f1..d2b593e3807d 100644 endmenu # -@@ -911,6 +941,7 @@ config NUMA_BALANCING +@@ -918,6 +948,7 @@ config NUMA_BALANCING depends on ARCH_SUPPORTS_NUMA_BALANCING depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY depends on SMP && NUMA && MIGRATION && !PREEMPT_RT @@ -407,7 +407,7 @@ index c7900e8975f1..d2b593e3807d 100644 help This option adds support for automatic NUMA aware memory/task placement. The mechanism is quite primitive and is based on migrating memory when -@@ -1003,6 +1034,7 @@ config FAIR_GROUP_SCHED +@@ -1015,6 +1046,7 @@ config FAIR_GROUP_SCHED depends on CGROUP_SCHED default CGROUP_SCHED @@ -415,7 +415,7 @@ index c7900e8975f1..d2b593e3807d 100644 config CFS_BANDWIDTH bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" depends on FAIR_GROUP_SCHED -@@ -1025,6 +1057,7 @@ config RT_GROUP_SCHED +@@ -1037,6 +1069,7 @@ config RT_GROUP_SCHED realtime bandwidth for them. See Documentation/scheduler/sched-rt-group.rst for more information. @@ -423,7 +423,7 @@ index c7900e8975f1..d2b593e3807d 100644 endif #CGROUP_SCHED config UCLAMP_TASK_GROUP -@@ -1268,6 +1301,7 @@ config CHECKPOINT_RESTORE +@@ -1281,6 +1314,7 @@ config CHECKPOINT_RESTORE config SCHED_AUTOGROUP bool "Automatic process group scheduling" @@ -432,7 +432,7 @@ index c7900e8975f1..d2b593e3807d 100644 select CGROUP_SCHED select FAIR_GROUP_SCHED diff --git a/init/init_task.c b/init/init_task.c -index 73cc8f03511a..2d0bad762895 100644 +index ff6c4b9bfe6b..19e9c662d1a1 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -75,9 +75,15 @@ struct task_struct init_task @@ -491,10 +491,10 @@ index c2f1fd95a821..41654679b1b2 100644 This option permits Core Scheduling, a means of coordinated task selection across SMT siblings. When enabled -- see diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c -index 71a418858a5e..7e3016873db1 100644 +index b474289c15b8..a23224b45b03 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c -@@ -704,7 +704,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) +@@ -787,7 +787,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) return ret; } @@ -503,7 +503,7 @@ index 71a418858a5e..7e3016873db1 100644 /* * Helper routine for generate_sched_domains(). * Do cpusets a, b have overlapping effective cpus_allowed masks? -@@ -1100,7 +1100,7 @@ static void rebuild_sched_domains_locked(void) +@@ -1183,7 +1183,7 @@ static void rebuild_sched_domains_locked(void) /* Have scheduler rebuild the domains */ partition_and_rebuild_sched_domains(ndoms, doms, attr); } @@ -513,7 +513,7 @@ index 71a418858a5e..7e3016873db1 100644 { } diff --git a/kernel/delayacct.c b/kernel/delayacct.c -index 164ed9ef77a3..c974a84b056f 100644 +index e39cb696cfbd..463423572e09 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) @@ -526,10 +526,10 @@ index 164ed9ef77a3..c974a84b056f 100644 d->cpu_count += t1; diff --git a/kernel/exit.c b/kernel/exit.c -index 64c938ce36fe..a353f7ef5392 100644 +index 35e0a31a0315..64e368441cf4 100644 --- a/kernel/exit.c +++ b/kernel/exit.c -@@ -124,7 +124,7 @@ static void __exit_signal(struct task_struct *tsk) +@@ -125,7 +125,7 @@ static void __exit_signal(struct task_struct *tsk) sig->curr_target = next_thread(tsk); } @@ -538,7 +538,7 @@ index 64c938ce36fe..a353f7ef5392 100644 sizeof(unsigned long long)); /* -@@ -145,7 +145,7 @@ static void __exit_signal(struct task_struct *tsk) +@@ -146,7 +146,7 @@ static void __exit_signal(struct task_struct *tsk) sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); @@ -632,10 +632,10 @@ index 976092b7bd45..31d587c16ec1 100644 obj-y += build_utility.o diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c new file mode 100644 -index 000000000000..d0ab41c4d9ad +index 000000000000..4bea0c025475 --- /dev/null +++ b/kernel/sched/alt_core.c -@@ -0,0 +1,7807 @@ +@@ -0,0 +1,7912 @@ +/* + * kernel/sched/alt_core.c + * @@ -671,6 +671,7 @@ index 000000000000..d0ab41c4d9ad + +#include + ++#include +#include + +#define CREATE_TRACE_POINTS @@ -681,7 +682,7 @@ index 000000000000..d0ab41c4d9ad + +#include "pelt.h" + -+#include "../../fs/io-wq.h" ++#include "../../io_uring/io-wq.h" +#include "../smpboot.h" + +/* @@ -705,7 +706,7 @@ index 000000000000..d0ab41c4d9ad +#define sched_feat(x) (0) +#endif /* CONFIG_SCHED_DEBUG */ + -+#define ALT_SCHED_VERSION "v5.19-r0" ++#define ALT_SCHED_VERSION "v6.1-r0" + +/* rt_prio(prio) defined in include/linux/sched/rt.h */ +#define rt_task(p) rt_prio((p)->prio) @@ -927,8 +928,7 @@ index 000000000000..d0ab41c4d9ad + * p->se.load, p->rt_priority, + * p->dl.dl_{runtime, deadline, period, flags, bw, density} + * - sched_setnuma(): p->numa_preferred_nid -+ * - sched_move_task()/ -+ * cpu_cgroup_fork(): p->sched_task_group ++ * - sched_move_task(): p->sched_task_group + * - uclamp_update_active() p->uclamp* + * + * p->state <- TASK_*: @@ -1189,6 +1189,7 @@ index 000000000000..d0ab41c4d9ad + + rq->prev_irq_time += irq_delta; + delta -= irq_delta; ++ psi_account_irqtime(rq->curr, irq_delta); +#endif +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + if (static_key_false((¶virt_steal_rq_enabled))) { @@ -1267,9 +1268,9 @@ index 000000000000..d0ab41c4d9ad +} + +#ifdef CONFIG_SMP -+unsigned long sched_cpu_util(int cpu, unsigned long max) ++unsigned long sched_cpu_util(int cpu) +{ -+ return rq_load_util(cpu_rq(cpu), max); ++ return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu)); +} +#endif /* CONFIG_SMP */ + @@ -1446,15 +1447,11 @@ index 000000000000..d0ab41c4d9ad + ({ \ + typeof(ptr) _ptr = (ptr); \ + typeof(mask) _mask = (mask); \ -+ typeof(*_ptr) _old, _val = *_ptr; \ ++ typeof(*_ptr) _val = *_ptr; \ + \ -+ for (;;) { \ -+ _old = cmpxchg(_ptr, _val, _val | _mask); \ -+ if (_old == _val) \ -+ break; \ -+ _val = _old; \ -+ } \ -+ _old; \ ++ do { \ ++ } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ ++ _val; \ +}) + +#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) @@ -1463,7 +1460,7 @@ index 000000000000..d0ab41c4d9ad + * this avoids any races wrt polling state changes and thereby avoids + * spurious IPIs. + */ -+static bool set_nr_and_not_polling(struct task_struct *p) ++static inline bool set_nr_and_not_polling(struct task_struct *p) +{ + struct thread_info *ti = task_thread_info(p); + return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); @@ -1478,30 +1475,28 @@ index 000000000000..d0ab41c4d9ad +static bool set_nr_if_polling(struct task_struct *p) +{ + struct thread_info *ti = task_thread_info(p); -+ typeof(ti->flags) old, val = READ_ONCE(ti->flags); ++ typeof(ti->flags) val = READ_ONCE(ti->flags); + + for (;;) { + if (!(val & _TIF_POLLING_NRFLAG)) + return false; + if (val & _TIF_NEED_RESCHED) + return true; -+ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); -+ if (old == val) ++ if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)) + break; -+ val = old; + } + return true; +} + +#else -+static bool set_nr_and_not_polling(struct task_struct *p) ++static inline bool set_nr_and_not_polling(struct task_struct *p) +{ + set_tsk_need_resched(p); + return true; +} + +#ifdef CONFIG_SMP -+static bool set_nr_if_polling(struct task_struct *p) ++static inline bool set_nr_if_polling(struct task_struct *p) +{ + return false; +} @@ -2175,7 +2170,7 @@ index 000000000000..d0ab41c4d9ad + rq = cpu_rq(new_cpu); + + raw_spin_lock(&rq->lock); -+ BUG_ON(task_cpu(p) != new_cpu); ++ WARN_ON_ONCE(task_cpu(p) != new_cpu); + sched_task_sanity_check(p, rq); + enqueue_task(p, rq, 0); + p->on_rq = TASK_ON_RQ_QUEUED; @@ -2312,12 +2307,12 @@ index 000000000000..d0ab41c4d9ad +/* + * wait_task_inactive - wait for a thread to unschedule. + * -+ * If @match_state is nonzero, it's the @p->state value just checked and -+ * not expected to change. If it changes, i.e. @p might have woken up, -+ * then return zero. When we succeed in waiting for @p to be off its CPU, -+ * we return a positive number (its total switch count). If a second call -+ * a short while later returns the same number, the caller can be sure that -+ * @p has remained unscheduled the whole time. ++ * Wait for the thread to block in any of the states set in @match_state. ++ * If it changes, i.e. @p might have woken up, then return zero. When we ++ * succeed in waiting for @p to be off its CPU, we return a positive number ++ * (its total switch count). If a second call a short while later returns the ++ * same number, the caller can be sure that @p has remained unscheduled the ++ * whole time. + * + * The caller must ensure that the task *will* unschedule sometime soon, + * else this function might spin for a *long* time. This function can't @@ -2347,8 +2342,8 @@ index 000000000000..d0ab41c4d9ad + * if the runqueue has changed and p is actually now + * running somewhere else! + */ -+ while (task_running(p) && p == rq->curr) { -+ if (match_state && unlikely(READ_ONCE(p->__state) != match_state)) ++ while (task_on_cpu(p) && p == rq->curr) { ++ if (!(READ_ONCE(p->__state) & match_state)) + return 0; + cpu_relax(); + } @@ -2360,10 +2355,10 @@ index 000000000000..d0ab41c4d9ad + */ + task_access_lock_irqsave(p, &lock, &flags); + trace_sched_wait_task(p); -+ running = task_running(p); ++ running = task_on_cpu(p); + on_rq = p->on_rq; + ncsw = 0; -+ if (!match_state || READ_ONCE(p->__state) == match_state) ++ if (READ_ONCE(p->__state) & match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_access_unlock_irqrestore(p, lock, &flags); + @@ -2607,7 +2602,7 @@ index 000000000000..d0ab41c4d9ad + rq->nr_pinned--; + } + -+ if (task_running(p) || READ_ONCE(p->__state) == TASK_WAKING) { ++ if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) { + struct migration_arg arg = { p, dest_cpu }; + + /* Need help from migration thread: drop lock and wait. */ @@ -3033,7 +3028,7 @@ index 000000000000..d0ab41c4d9ad + __smp_call_single_queue(cpu, &p->wake_entry.llist); +} + -+static inline bool ttwu_queue_cond(int cpu, int wake_flags) ++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) +{ + /* + * Do not complicate things with the async wake_list while the CPU is @@ -3042,6 +3037,10 @@ index 000000000000..d0ab41c4d9ad + if (!cpu_active(cpu)) + return false; + ++ /* Ensure the task will still be allowed to run on the CPU. */ ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) ++ return false; ++ + /* + * If the CPU does not share cache, then queue the task on the + * remote rqs wakelist to avoid accessing remote data. @@ -3049,13 +3048,21 @@ index 000000000000..d0ab41c4d9ad + if (!cpus_share_cache(smp_processor_id(), cpu)) + return true; + ++ if (cpu == smp_processor_id()) ++ return false; ++ + /* -+ * If the task is descheduling and the only running task on the -+ * CPU then use the wakelist to offload the task activation to -+ * the soon-to-be-idle CPU as the current CPU is likely busy. -+ * nr_running is checked to avoid unnecessary task stacking. ++ * If the wakee cpu is idle, or the task is descheduling and the ++ * only running task on the CPU, then use the wakelist to offload ++ * the task activation to the idle (or soon-to-be-idle) CPU as ++ * the current CPU is likely busy. nr_running is checked to ++ * avoid unnecessary task stacking. ++ * ++ * Note that we can only get here with (wakee) p->on_rq=0, ++ * p->on_cpu can be whatever, we've done the dequeue, so ++ * the wakee has been accounted out of ->nr_running. + */ -+ if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) ++ if (!cpu_rq(cpu)->nr_running) + return true; + + return false; @@ -3063,10 +3070,7 @@ index 000000000000..d0ab41c4d9ad + +static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) +{ -+ if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { -+ if (WARN_ON_ONCE(cpu == smp_processor_id())) -+ return false; -+ ++ if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { + sched_clock_cpu(cpu); /* Sync clocks across CPUs */ + __ttwu_queue_wakelist(p, cpu, wake_flags); + return true; @@ -3422,7 +3426,7 @@ index 000000000000..d0ab41c4d9ad + * scheduling. + */ + if (smp_load_acquire(&p->on_cpu) && -+ ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) ++ ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) + goto unlock; + + /* @@ -3465,6 +3469,40 @@ index 000000000000..d0ab41c4d9ad + return success; +} + ++static bool __task_needs_rq_lock(struct task_struct *p) ++{ ++ unsigned int state = READ_ONCE(p->__state); ++ ++ /* ++ * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when ++ * the task is blocked. Make sure to check @state since ttwu() can drop ++ * locks at the end, see ttwu_queue_wakelist(). ++ */ ++ if (state == TASK_RUNNING || state == TASK_WAKING) ++ return true; ++ ++ /* ++ * Ensure we load p->on_rq after p->__state, otherwise it would be ++ * possible to, falsely, observe p->on_rq == 0. ++ * ++ * See try_to_wake_up() for a longer comment. ++ */ ++ smp_rmb(); ++ if (p->on_rq) ++ return true; ++ ++#ifdef CONFIG_SMP ++ /* ++ * Ensure the task has finished __schedule() and will not be referenced ++ * anymore. Again, see try_to_wake_up() for a longer comment. ++ */ ++ smp_rmb(); ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++#endif ++ ++ return false; ++} ++ +/** + * task_call_func - Invoke a function on task in fixed state + * @p: Process for which the function is to be invoked, can be @current. @@ -3482,28 +3520,12 @@ index 000000000000..d0ab41c4d9ad +int task_call_func(struct task_struct *p, task_call_f func, void *arg) +{ + struct rq *rq = NULL; -+ unsigned int state; + struct rq_flags rf; + int ret; + + raw_spin_lock_irqsave(&p->pi_lock, rf.flags); + -+ state = READ_ONCE(p->__state); -+ -+ /* -+ * Ensure we load p->on_rq after p->__state, otherwise it would be -+ * possible to, falsely, observe p->on_rq == 0. -+ * -+ * See try_to_wake_up() for a longer comment. -+ */ -+ smp_rmb(); -+ -+ /* -+ * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when -+ * the task is blocked. Make sure to check @state since ttwu() can drop -+ * locks at the end, see ttwu_queue_wakelist(). -+ */ -+ if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq) ++ if (__task_needs_rq_lock(p)) + rq = __task_rq_lock(p, &rf); + + /* @@ -3526,6 +3548,38 @@ index 000000000000..d0ab41c4d9ad +} + +/** ++ * cpu_curr_snapshot - Return a snapshot of the currently running task ++ * @cpu: The CPU on which to snapshot the task. ++ * ++ * Returns the task_struct pointer of the task "currently" running on ++ * the specified CPU. If the same task is running on that CPU throughout, ++ * the return value will be a pointer to that task's task_struct structure. ++ * If the CPU did any context switches even vaguely concurrently with the ++ * execution of this function, the return value will be a pointer to the ++ * task_struct structure of a randomly chosen task that was running on ++ * that CPU somewhere around the time that this function was executing. ++ * ++ * If the specified CPU was offline, the return value is whatever it ++ * is, perhaps a pointer to the task_struct structure of that CPU's idle ++ * task, but there is no guarantee. Callers wishing a useful return ++ * value must take some action to ensure that the specified CPU remains ++ * online throughout. ++ * ++ * This function executes full memory barriers before and after fetching ++ * the pointer, which permits the caller to confine this function's fetch ++ * with respect to the caller's accesses to other shared variables. ++ */ ++struct task_struct *cpu_curr_snapshot(int cpu) ++{ ++ struct task_struct *t; ++ ++ smp_mb(); /* Pairing determined by caller's synchronization design. */ ++ t = rcu_dereference(cpu_curr(cpu)); ++ smp_mb(); /* Pairing determined by caller's synchronization design. */ ++ return t; ++} ++ ++/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * @@ -3884,7 +3938,8 @@ index 000000000000..d0ab41c4d9ad + * Claim the task as running, we do this before switching to it + * such that any running task will have this set. + * -+ * See the ttwu() WF_ON_CPU case and its ordering comment. ++ * See the smp_load_acquire(&p->on_cpu) case in ttwu() and ++ * its ordering comment. + */ + WRITE_ONCE(next->on_cpu, 1); +} @@ -3911,10 +3966,10 @@ index 000000000000..d0ab41c4d9ad + +#ifdef CONFIG_SMP + -+static void do_balance_callbacks(struct rq *rq, struct callback_head *head) ++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) +{ + void (*func)(struct rq *rq); -+ struct callback_head *next; ++ struct balance_callback *next; + + lockdep_assert_held(&rq->lock); + @@ -3941,15 +3996,15 @@ index 000000000000..d0ab41c4d9ad + * This abuse is tolerated because it places all the unlikely/odd cases behind + * a single test, namely: rq->balance_callback == NULL. + */ -+struct callback_head balance_push_callback = { ++struct balance_callback balance_push_callback = { + .next = NULL, -+ .func = (void (*)(struct callback_head *))balance_push, ++ .func = balance_push, +}; + -+static inline struct callback_head * ++static inline struct balance_callback * +__splice_balance_callbacks(struct rq *rq, bool split) +{ -+ struct callback_head *head = rq->balance_callback; ++ struct balance_callback *head = rq->balance_callback; + + if (likely(!head)) + return NULL; @@ -3971,7 +4026,7 @@ index 000000000000..d0ab41c4d9ad + return head; +} + -+static inline struct callback_head *splice_balance_callbacks(struct rq *rq) ++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) +{ + return __splice_balance_callbacks(rq, true); +} @@ -3981,7 +4036,7 @@ index 000000000000..d0ab41c4d9ad + do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); +} + -+static inline void balance_callbacks(struct rq *rq, struct callback_head *head) ++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) +{ + unsigned long flags; + @@ -3998,12 +4053,12 @@ index 000000000000..d0ab41c4d9ad +{ +} + -+static inline struct callback_head *splice_balance_callbacks(struct rq *rq) ++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) +{ + return NULL; +} + -+static inline void balance_callbacks(struct rq *rq, struct callback_head *head) ++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) +{ +} + @@ -4258,6 +4313,7 @@ index 000000000000..d0ab41c4d9ad + * finish_task_switch()'s mmdrop(). + */ + switch_mm_irqs_off(prev->active_mm, next->mm, next); ++ lru_gen_use_mm(next->mm); + + if (!prev->mm) { // from kernel + /* will mmdrop() in finish_task_switch(). */ @@ -4941,18 +4997,23 @@ index 000000000000..d0ab41c4d9ad + +#ifdef CONFIG_SMP + -+#define SCHED_RQ_NR_MIGRATION (32U) ++#ifdef CONFIG_PREEMPT_RT ++#define SCHED_NR_MIGRATE_BREAK 8 ++#else ++#define SCHED_NR_MIGRATE_BREAK 32 ++#endif ++ ++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; ++ +/* + * Migrate pending tasks in @rq to @dest_cpu -+ * Will try to migrate mininal of half of @rq nr_running tasks and -+ * SCHED_RQ_NR_MIGRATION to @dest_cpu + */ +static inline int +migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu) +{ + struct task_struct *p, *skip = rq->curr; + int nr_migrated = 0; -+ int nr_tries = min(rq->nr_running / 2, SCHED_RQ_NR_MIGRATION); ++ int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate); + + while (skip != rq->idle && nr_tries && + (p = sched_rq_next_task(skip, rq)) != rq->idle) { @@ -5193,7 +5254,7 @@ index 000000000000..d0ab41c4d9ad + prev->sched_contributes_to_load = + (prev_state & TASK_UNINTERRUPTIBLE) && + !(prev_state & TASK_NOLOAD) && -+ !(prev->flags & PF_FROZEN); ++ !(prev->flags & TASK_FROZEN); + + if (prev->sched_contributes_to_load) + rq->nr_uninterruptible++; @@ -5309,8 +5370,12 @@ index 000000000000..d0ab41c4d9ad + io_wq_worker_sleeping(tsk); + } + -+ if (tsk_is_pi_blocked(tsk)) -+ return; ++ /* ++ * spinlock and rwlock must not flush block requests. This will ++ * deadlock if the callback attempts to acquire a lock which is ++ * already acquired. ++ */ ++ SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); + + /* + * If we are going to sleep and we have plugged IO queued, @@ -5368,7 +5433,7 @@ index 000000000000..d0ab41c4d9ad + } while (need_resched()); +} + -+#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) ++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) +asmlinkage __visible void __sched schedule_user(void) +{ + /* @@ -5741,17 +5806,29 @@ index 000000000000..d0ab41c4d9ad +EXPORT_SYMBOL(set_user_nice); + +/* ++ * is_nice_reduction - check if nice value is an actual reduction ++ * ++ * Similar to can_nice() but does not perform a capability check. ++ * ++ * @p: task ++ * @nice: nice value ++ */ ++static bool is_nice_reduction(const struct task_struct *p, const int nice) ++{ ++ /* Convert nice value [19,-20] to rlimit style value [1,40]: */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); ++} ++ ++/* + * can_nice - check if a task can reduce its nice value + * @p: task + * @nice: nice value + */ +int can_nice(const struct task_struct *p, const int nice) +{ -+ /* Convert nice value [19,-20] to rlimit style value [1,40] */ -+ int nice_rlim = nice_to_rlimit(nice); -+ -+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || -+ capable(CAP_SYS_NICE)); ++ return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); +} + +#ifdef __ARCH_WANT_SYS_NICE @@ -5902,6 +5979,45 @@ index 000000000000..d0ab41c4d9ad + return match; +} + ++/* ++ * Allow unprivileged RT tasks to decrease priority. ++ * Only issue a capable test if needed and only once to avoid an audit ++ * event on permitted non-privileged operations: ++ */ ++static int user_check_sched_setscheduler(struct task_struct *p, ++ const struct sched_attr *attr, ++ int policy, int reset_on_fork) ++{ ++ if (rt_policy(policy)) { ++ unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* Can't set/change the rt policy: */ ++ if (policy != p->policy && !rlim_rtprio) ++ goto req_priv; ++ ++ /* Can't increase priority: */ ++ if (attr->sched_priority > p->rt_priority && ++ attr->sched_priority > rlim_rtprio) ++ goto req_priv; ++ } ++ ++ /* Can't change other user's priorities: */ ++ if (!check_same_owner(p)) ++ goto req_priv; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag: */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ goto req_priv; ++ ++ return 0; ++ ++req_priv: ++ if (!capable(CAP_SYS_NICE)) ++ return -EPERM; ++ ++ return 0; ++} ++ +static int __sched_setscheduler(struct task_struct *p, + const struct sched_attr *attr, + bool user, bool pi) @@ -5914,7 +6030,7 @@ index 000000000000..d0ab41c4d9ad + }; + int oldpolicy = -1, policy = attr->sched_policy; + int retval, newprio; -+ struct callback_head *head; ++ struct balance_callback *head; + unsigned long flags; + struct rq *rq; + int reset_on_fork; @@ -5958,34 +6074,11 @@ index 000000000000..d0ab41c4d9ad + (attr->sched_priority != 0)) + return -EINVAL; + -+ /* -+ * Allow unprivileged RT tasks to decrease priority: -+ */ -+ if (user && !capable(CAP_SYS_NICE)) { -+ if (SCHED_FIFO == policy || SCHED_RR == policy) { -+ unsigned long rlim_rtprio = -+ task_rlimit(p, RLIMIT_RTPRIO); -+ -+ /* Can't set/change the rt policy */ -+ if (policy != p->policy && !rlim_rtprio) -+ return -EPERM; -+ -+ /* Can't increase priority */ -+ if (attr->sched_priority > p->rt_priority && -+ attr->sched_priority > rlim_rtprio) -+ return -EPERM; -+ } -+ -+ /* Can't change other user's priorities */ -+ if (!check_same_owner(p)) -+ return -EPERM; -+ -+ /* Normal users shall not reset the sched_reset_on_fork flag */ -+ if (p->sched_reset_on_fork && !reset_on_fork) -+ return -EPERM; -+ } -+ + if (user) { ++ retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); ++ if (retval) ++ return retval; ++ + retval = security_task_setscheduler(p); + if (retval) + return retval; @@ -7234,7 +7327,7 @@ index 000000000000..d0ab41c4d9ad + if (pid_alive(p)) + ppid = task_pid_nr(rcu_dereference(p->real_parent)); + rcu_read_unlock(); -+ pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", ++ pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n", + free, task_pid_nr(p), ppid, + read_task_thread_flags(p)); + @@ -7262,7 +7355,7 @@ index 000000000000..d0ab41c4d9ad + * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows + * TASK_KILLABLE). + */ -+ if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE) ++ if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) + return false; + + return true; @@ -7304,6 +7397,19 @@ index 000000000000..d0ab41c4d9ad + +void dump_cpu_task(int cpu) +{ ++ if (cpu == smp_processor_id() && in_hardirq()) { ++ struct pt_regs *regs; ++ ++ regs = get_irq_regs(); ++ if (regs) { ++ show_regs(regs); ++ return; ++ } ++ } ++ ++ if (trigger_single_cpu_backtrace(cpu)) ++ return; ++ + pr_info("Task dump for CPU %d:\n", cpu); + sched_show_task(cpu_curr(cpu)); +} @@ -7379,7 +7485,7 @@ index 000000000000..d0ab41c4d9ad +} + +int task_can_attach(struct task_struct *p, -+ const struct cpumask *cs_cpus_allowed) ++ const struct cpumask *cs_effective_cpus) +{ + int ret = 0; + @@ -8350,14 +8456,12 @@ index 000000000000..d0ab41c4d9ad + sched_unregister_group(tg); +} + -+static void cpu_cgroup_fork(struct task_struct *task) -+{ -+} -+ ++#ifdef CONFIG_RT_GROUP_SCHED +static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) +{ + return 0; +} ++#endif + +static void cpu_cgroup_attach(struct cgroup_taskset *tset) +{ @@ -8431,8 +8535,9 @@ index 000000000000..d0ab41c4d9ad + .css_released = cpu_cgroup_css_released, + .css_free = cpu_cgroup_css_free, + .css_extra_stat_show = cpu_extra_stat_show, -+ .fork = cpu_cgroup_fork, ++#ifdef CONFIG_RT_GROUP_SCHED + .can_attach = cpu_cgroup_can_attach, ++#endif + .attach = cpu_cgroup_attach, + .legacy_cftypes = cpu_files, + .legacy_cftypes = cpu_legacy_files, @@ -8482,13 +8587,14 @@ index 000000000000..1212a031700e +{} diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h new file mode 100644 -index 000000000000..a181bf9ce57d +index 000000000000..7c1cc0cbca0d --- /dev/null +++ b/kernel/sched/alt_sched.h -@@ -0,0 +1,645 @@ +@@ -0,0 +1,660 @@ +#ifndef ALT_SCHED_H +#define ALT_SCHED_H + ++#include +#include +#include +#include @@ -8567,6 +8673,15 @@ index 000000000000..a181bf9ce57d +#define MAX_SHARES (1UL << 18) +#endif + ++/* ++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off: ++ */ ++#ifdef CONFIG_SCHED_DEBUG ++# define const_debug __read_mostly ++#else ++# define const_debug const ++#endif ++ +/* task_struct::on_rq states: */ +#define TASK_ON_RQ_QUEUED 1 +#define TASK_ON_RQ_MIGRATING 2 @@ -8587,7 +8702,6 @@ index 000000000000..a181bf9ce57d +#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ +#define WF_FORK 0x02 /* child wakeup after fork */ +#define WF_MIGRATED 0x04 /* internal use, task got migrated */ -+#define WF_ON_CPU 0x08 /* Wakee is on_rq */ + +#define SCHED_QUEUE_BITS (SCHED_BITS - 1) + @@ -8596,6 +8710,12 @@ index 000000000000..a181bf9ce57d + struct list_head heads[SCHED_BITS]; +}; + ++struct rq; ++struct balance_callback { ++ struct balance_callback *next; ++ void (*func)(struct rq *rq); ++}; ++ +/* + * This is the main, per-CPU runqueue data structure. + * This data should only be modified by the local cpu. @@ -8644,7 +8764,7 @@ index 000000000000..a181bf9ce57d + int active_balance; + struct cpu_stop_work active_balance_work; +#endif -+ struct callback_head *balance_callback; ++ struct balance_callback *balance_callback; +#ifdef CONFIG_HOTPLUG_CPU + struct rcuwait hotplug_wait; +#endif @@ -8943,7 +9063,7 @@ index 000000000000..a181bf9ce57d + return rq->curr == p; +} + -+static inline bool task_running(struct task_struct *p) ++static inline bool task_on_cpu(struct task_struct *p) +{ + return p->on_cpu; +} @@ -9287,25 +9407,25 @@ index 99bdd96f454f..23f80a86d2d7 100644 #endif diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c -index 3dbf351d12d5..b2590f961139 100644 +index 1207c78f85c1..68812e0756cb 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c -@@ -160,9 +160,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) - unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); +@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) + struct rq *rq = cpu_rq(sg_cpu->cpu); - sg_cpu->max = max; + sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu); +#ifndef CONFIG_SCHED_ALT sg_cpu->bw_dl = cpu_bw_dl(rq); - sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), max, + sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), FREQUENCY_UTIL, NULL); +#else + sg_cpu->bw_dl = 0; -+ sg_cpu->util = rq_load_util(rq, max); ++ sg_cpu->util = rq_load_util(rq, sg_cpu->max); +#endif /* CONFIG_SCHED_ALT */ } /** -@@ -306,8 +311,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } +@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } */ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) { @@ -9316,7 +9436,7 @@ index 3dbf351d12d5..b2590f961139 100644 } static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, -@@ -607,6 +614,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) +@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) } ret = sched_setattr_nocheck(thread, &attr); @@ -9324,7 +9444,7 @@ index 3dbf351d12d5..b2590f961139 100644 if (ret) { kthread_stop(thread); pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); -@@ -839,7 +847,9 @@ cpufreq_governor_init(schedutil_gov); +@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov); #ifdef CONFIG_ENERGY_MODEL static void rebuild_sd_workfn(struct work_struct *work) { @@ -9335,7 +9455,7 @@ index 3dbf351d12d5..b2590f961139 100644 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c -index 78a233d43757..b3bbc87d4352 100644 +index 95fc77853743..b48b3f9ed47f 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime) @@ -9356,7 +9476,7 @@ index 78a233d43757..b3bbc87d4352 100644 task_group_account_field(p, CPUTIME_NICE, cputime); cpustat[CPUTIME_GUEST_NICE] += cputime; } else { -@@ -269,7 +269,7 @@ static inline u64 account_other_time(u64 max) +@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max) #ifdef CONFIG_64BIT static inline u64 read_sum_exec_runtime(struct task_struct *t) { @@ -9365,7 +9485,7 @@ index 78a233d43757..b3bbc87d4352 100644 } #else static u64 read_sum_exec_runtime(struct task_struct *t) -@@ -279,7 +279,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) +@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) struct rq *rq; rq = task_rq_lock(t, &rf); @@ -9374,7 +9494,7 @@ index 78a233d43757..b3bbc87d4352 100644 task_rq_unlock(rq, t, &rf); return ns; -@@ -611,7 +611,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, +@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) { struct task_cputime cputime = { @@ -9384,7 +9504,7 @@ index 78a233d43757..b3bbc87d4352 100644 if (task_cputime(p, &cputime.utime, &cputime.stime)) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index bb3d63bdf4ae..4e1680785704 100644 +index 1637b65ba07a..033c6deeb515 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -7,6 +7,7 @@ @@ -9435,7 +9555,7 @@ index bb3d63bdf4ae..4e1680785704 100644 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency); debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity); -@@ -336,11 +343,13 @@ static __init int sched_init_debug(void) +@@ -337,11 +344,13 @@ static __init int sched_init_debug(void) #endif debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); @@ -9449,7 +9569,7 @@ index bb3d63bdf4ae..4e1680785704 100644 #ifdef CONFIG_SMP static cpumask_var_t sd_sysctl_cpus; -@@ -1067,6 +1076,7 @@ void proc_sched_set_task(struct task_struct *p) +@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p) memset(&p->stats, 0, sizeof(p->stats)); #endif } @@ -9458,7 +9578,7 @@ index bb3d63bdf4ae..4e1680785704 100644 void resched_latency_warn(int cpu, u64 latency) { diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c -index 328cccbee444..aef991facc79 100644 +index f26ab2675f7d..480d4ad16d45 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state) @@ -9631,7 +9751,7 @@ index 0f310768260c..bd38bf738fe9 100644 * thermal: * diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h -index 4ff2ed4f8fa1..226eeed61318 100644 +index 3a0e0dc28721..e8a7d84aa5a5 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -1,13 +1,15 @@ @@ -9659,7 +9779,7 @@ index 4ff2ed4f8fa1..226eeed61318 100644 static inline void cfs_se_util_change(struct sched_avg *avg) { unsigned int enqueued; -@@ -155,9 +158,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) +@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) return rq_clock_pelt(rq_of(cfs_rq)); } #endif @@ -9671,7 +9791,7 @@ index 4ff2ed4f8fa1..226eeed61318 100644 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { -@@ -175,6 +180,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) +@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) { return 0; } @@ -9680,7 +9800,7 @@ index 4ff2ed4f8fa1..226eeed61318 100644 static inline int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 47b89a0fc6e5..de2641a32c22 100644 +index a4a20046e586..c363693cd869 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -5,6 +5,10 @@ @@ -9694,9 +9814,9 @@ index 47b89a0fc6e5..de2641a32c22 100644 #include #include #include -@@ -3116,4 +3120,9 @@ extern int sched_dynamic_mode(const char *str); - extern void sched_dynamic_update(int mode); - #endif +@@ -3183,4 +3187,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr, + cgroup_account_cputime(curr, delta_exec); + } +static inline int task_running_nice(struct task_struct *p) +{ @@ -9736,7 +9856,7 @@ index 857f837f52cb..5486c63e4790 100644 } return 0; diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h -index baa839c1ba96..15238be0581b 100644 +index 84a188913cc9..53934e7ef5db 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt @@ -9754,9 +9874,9 @@ index baa839c1ba96..15238be0581b 100644 +#endif /* CONFIG_SCHED_ALT */ #ifdef CONFIG_PSI - /* + void psi_task_change(struct task_struct *task, int clear, int set); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c -index 05b6c2ad90b9..480ef393b3c9 100644 +index 8739c2a5a54e..d8dd6c15eb47 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -3,6 +3,7 @@ @@ -9794,7 +9914,7 @@ index 05b6c2ad90b9..480ef393b3c9 100644 #ifdef CONFIG_NUMA static const struct cpumask *sd_numa_mask(int cpu) -@@ -2638,3 +2643,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], +@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); mutex_unlock(&sched_domains_mutex); } @@ -9811,12 +9931,12 @@ index 05b6c2ad90b9..480ef393b3c9 100644 +#endif /* CONFIG_NUMA */ +#endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 35d034219513..23719c728677 100644 +index c6d9dec11b74..2bc42ce8b48e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c -@@ -86,6 +86,10 @@ +@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); - /* Constants used for minimum and maximum */ + /* Constants used for minimum and maximum */ +#ifdef CONFIG_SCHED_ALT +extern int sched_yield_type; @@ -9825,7 +9945,7 @@ index 35d034219513..23719c728677 100644 #ifdef CONFIG_PERF_EVENTS static const int six_hundred_forty_kb = 640 * 1024; #endif -@@ -1590,6 +1594,7 @@ int proc_do_static_key(struct ctl_table *table, int write, +@@ -1633,6 +1637,7 @@ int proc_do_static_key(struct ctl_table *table, int write, } static struct ctl_table kern_table[] = { @@ -9833,15 +9953,15 @@ index 35d034219513..23719c728677 100644 #ifdef CONFIG_NUMA_BALANCING { .procname = "numa_balancing", -@@ -1601,6 +1606,7 @@ static struct ctl_table kern_table[] = { - .extra2 = SYSCTL_FOUR, +@@ -1652,6 +1657,7 @@ static struct ctl_table kern_table[] = { + .extra1 = SYSCTL_ZERO, }, #endif /* CONFIG_NUMA_BALANCING */ +#endif /* !CONFIG_SCHED_ALT */ { .procname = "panic", .data = &panic_timeout, -@@ -1902,6 +1908,17 @@ static struct ctl_table kern_table[] = { +@@ -1953,6 +1959,17 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif @@ -9860,7 +9980,7 @@ index 35d034219513..23719c728677 100644 { .procname = "spin_retry", diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c -index 0ea8702eb516..a27a0f3a654d 100644 +index 3ae661ab6260..35f0176dcdb0 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, diff --git a/sys-kernel/pinephone-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch b/sys-kernel/pinephone-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch deleted file mode 100644 index 6b2049d..0000000 --- a/sys-kernel/pinephone-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch +++ /dev/null @@ -1,13 +0,0 @@ ---- a/init/Kconfig 2022-07-07 13:22:00.698439887 -0400 -+++ b/init/Kconfig 2022-07-07 13:23:45.152333576 -0400 -@@ -874,8 +874,9 @@ config UCLAMP_BUCKETS_COUNT - If in doubt, use the default value. - - menuconfig SCHED_ALT -+ depends on X86_64 - bool "Alternative CPU Schedulers" -- default y -+ default n - help - This feature enable alternative CPU scheduler" - diff --git a/sys-kernel/pinephone-sources/files/5021_sched-alt-missing-rq-lock-irq-function.patch b/sys-kernel/pinephone-sources/files/5021_sched-alt-missing-rq-lock-irq-function.patch new file mode 100644 index 0000000..04cca61 --- /dev/null +++ b/sys-kernel/pinephone-sources/files/5021_sched-alt-missing-rq-lock-irq-function.patch @@ -0,0 +1,30 @@ +From 4157360d2e1cbdfb8065f151dbe057b17188a23f Mon Sep 17 00:00:00 2001 +From: Tor Vic +Date: Mon, 7 Nov 2022 15:11:54 +0100 +Subject: [PATCH] sched/alt: Add missing rq_lock_irq() function to header file + +--- + kernel/sched/alt_sched.h | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h +index 93ff3bddd36f..a00bc84b93b2 100644 +--- a/kernel/sched/alt_sched.h ++++ b/kernel/sched/alt_sched.h +@@ -387,6 +387,13 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) + raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); + } + ++static inline void ++rq_lock_irq(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irq(&rq->lock); ++} ++ + static inline void + rq_lock(struct rq *rq, struct rq_flags *rf) + __acquires(rq->lock) +-- +GitLab + diff --git a/sys-kernel/pinephone-sources/files/config-ppp b/sys-kernel/pinephone-sources/files/config-ppp index 7e7a718..d6b69e5 100755 --- a/sys-kernel/pinephone-sources/files/config-ppp +++ b/sys-kernel/pinephone-sources/files/config-ppp @@ -1,10 +1,10 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 5.19.12 Kernel Configuration +# Linux/arm64 6.1.2-1 Kernel Configuration # -CONFIG_CC_VERSION_TEXT="gcc (Gentoo 12.2.1_p20221008 p1) 12.2.1 20221008" +CONFIG_CC_VERSION_TEXT="gcc (GCC) 12.1.0" CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=120201 +CONFIG_GCC_VERSION=120100 CONFIG_CLANG_VERSION=0 CONFIG_AS_IS_GNU=y CONFIG_AS_VERSION=23800 @@ -13,7 +13,6 @@ CONFIG_LD_VERSION=23800 CONFIG_LLD_VERSION=0 CONFIG_CC_CAN_LINK=y CONFIG_CC_CAN_LINK_STATIC=y -CONFIG_CC_HAS_ASM_GOTO=y CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y CONFIG_CC_HAS_ASM_INLINE=y @@ -72,6 +71,8 @@ CONFIG_ARCH_HAS_TICK_BROADCAST=y CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y # # Timers subsystem @@ -94,9 +95,10 @@ CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y # CONFIG_BPF_SYSCALL=y CONFIG_BPF_JIT=y -# CONFIG_BPF_JIT_ALWAYS_ON is not set +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_DEFAULT_ON=y -CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y # CONFIG_BPF_PRELOAD is not set # end of BPF subsystem @@ -168,8 +170,8 @@ CONFIG_NUMA_BALANCING=y CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y CONFIG_CGROUPS=y CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y CONFIG_CGROUP_WRITEBACK=y @@ -292,28 +294,24 @@ CONFIG_ARCH_PROC_KCORE_TEXT=y # CONFIG_ARCH_SUNXI is not set # CONFIG_ARCH_ALPINE is not set # CONFIG_ARCH_APPLE is not set -CONFIG_ARCH_BCM2835=y -# CONFIG_ARCH_BCM4908 is not set -# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BCM is not set # CONFIG_ARCH_BERLIN is not set # CONFIG_ARCH_BITMAIN is not set -# CONFIG_ARCH_BRCMSTB is not set # CONFIG_ARCH_EXYNOS is not set # CONFIG_ARCH_SPARX5 is not set # CONFIG_ARCH_K3 is not set -# CONFIG_ARCH_LAYERSCAPE is not set # CONFIG_ARCH_LG1K is not set # CONFIG_ARCH_HISI is not set # CONFIG_ARCH_KEEMBAY is not set # CONFIG_ARCH_MEDIATEK is not set # CONFIG_ARCH_MESON is not set # CONFIG_ARCH_MVEBU is not set -# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_NPCM is not set # CONFIG_ARCH_QCOM is not set # CONFIG_ARCH_REALTEK is not set # CONFIG_ARCH_RENESAS is not set CONFIG_ARCH_ROCKCHIP=y -# CONFIG_ARCH_S32 is not set # CONFIG_ARCH_SEATTLE is not set # CONFIG_ARCH_INTEL_SOCFPGA is not set # CONFIG_ARCH_SYNQUACER is not set @@ -353,12 +351,14 @@ CONFIG_ARM64_ERRATUM_1165522=y CONFIG_ARM64_ERRATUM_1319367=y CONFIG_ARM64_ERRATUM_1530923=y CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y CONFIG_ARM64_ERRATUM_1286807=y CONFIG_ARM64_ERRATUM_1463225=y CONFIG_ARM64_ERRATUM_1542419=y CONFIG_ARM64_ERRATUM_1508412=y CONFIG_ARM64_ERRATUM_2051678=y -CONFIG_ARM64_ERRATUM_2077057=y +# CONFIG_ARM64_ERRATUM_2077057 is not set +CONFIG_ARM64_ERRATUM_2658417=y CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y CONFIG_ARM64_ERRATUM_2054223=y CONFIG_ARM64_ERRATUM_2067961=y @@ -415,7 +415,7 @@ CONFIG_CRASH_DUMP=y CONFIG_TRANS_TABLE=y CONFIG_XEN_DOM0=y CONFIG_XEN=y -CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ARCH_FORCE_MAX_ORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y CONFIG_RODATA_FULL_DEFAULT_ENABLED=y @@ -423,6 +423,7 @@ CONFIG_RODATA_FULL_DEFAULT_ENABLED=y CONFIG_ARM64_TAGGED_ADDR_ABI=y CONFIG_COMPAT=y CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set # CONFIG_ARMV8_DEPRECATED is not set # @@ -472,7 +473,6 @@ CONFIG_AS_HAS_ARMV8_5=y CONFIG_ARM64_BTI=y CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y CONFIG_ARM64_E0PD=y -CONFIG_ARCH_RANDOM=y CONFIG_ARM64_AS_HAS_MTE=y CONFIG_ARM64_MTE=y # end of ARMv8.5 architectural features @@ -515,6 +515,7 @@ CONFIG_PM_STD_PARTITION="" CONFIG_PM_SLEEP=y CONFIG_PM_SLEEP_SMP=y CONFIG_PM_AUTOSLEEP=y +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=100 CONFIG_PM_WAKELOCKS_GC=y @@ -550,7 +551,6 @@ CONFIG_DT_IDLE_GENPD=y # # ARM CPU Idle Drivers # -CONFIG_ARM_CPUIDLE=y CONFIG_ARM_PSCI_CPUIDLE=y CONFIG_ARM_PSCI_CPUIDLE_DOMAIN=y # end of ARM CPU Idle Drivers @@ -603,28 +603,6 @@ CONFIG_KVM_XFER_TO_GUEST_WORK=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y # CONFIG_NVHE_EL2_DEBUG is not set -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA256_ARM64=y -CONFIG_CRYPTO_SHA512_ARM64=m -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_SHA512_ARM64_CE=m -CONFIG_CRYPTO_SHA3_ARM64=m -CONFIG_CRYPTO_SM3_ARM64_CE=m -# CONFIG_CRYPTO_SM4_ARM64_CE is not set -# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set -# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m -CONFIG_CRYPTO_AES_ARM64=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m -CONFIG_CRYPTO_CHACHA20_NEON=m -CONFIG_CRYPTO_POLY1305_NEON=m -# CONFIG_CRYPTO_NHPOLY1305_NEON is not set -CONFIG_CRYPTO_AES_ARM64_BS=m # # General architecture-dependent options @@ -636,6 +614,7 @@ CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y CONFIG_JUMP_LABEL=y # CONFIG_STATIC_KEYS_SELFTEST is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y @@ -684,16 +663,19 @@ CONFIG_ARCH_SUPPORTS_LTO_CLANG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y CONFIG_LTO_NONE=y CONFIG_ARCH_SUPPORTS_CFI_CLANG=y -CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y CONFIG_HAVE_MOVE_PUD=y CONFIG_HAVE_MOVE_PMD=y CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y CONFIG_HAVE_MOD_ARCH_SPECIFIC=y CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y CONFIG_ARCH_HAS_ELF_RANDOMIZE=y CONFIG_HAVE_ARCH_MMAP_RND_BITS=y CONFIG_ARCH_MMAP_RND_BITS=18 @@ -724,6 +706,7 @@ CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y # # GCOV-based kernel profiling @@ -874,6 +857,7 @@ CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 # CONFIG_PAGE_REPORTING is not set CONFIG_MIGRATION=y CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y @@ -885,9 +869,11 @@ CONFIG_KSM=y CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y CONFIG_MEMORY_FAILURE=y +CONFIG_ARCH_WANTS_THP_SWAP=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y # CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y # CONFIG_READ_ONLY_THP_FOR_FS is not set CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y @@ -903,7 +889,6 @@ CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_IDLE_PAGE_TRACKING is not set CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y -CONFIG_ARCH_HAS_VM_GET_PAGE_PROT=y CONFIG_ARCH_HAS_PTE_DEVMAP=y CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA32=y @@ -915,6 +900,7 @@ CONFIG_ARCH_HAS_PTE_SPECIAL=y CONFIG_SECRETMEM=y # CONFIG_ANON_VMA_NAME is not set # CONFIG_USERFAULTFD is not set +# CONFIG_LRU_GEN is not set # # Data Access Monitoring @@ -977,6 +963,7 @@ CONFIG_INET_ESP=m # CONFIG_INET_ESP_OFFLOAD is not set # CONFIG_INET_ESPINTCP is not set CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 CONFIG_INET_XFRM_TUNNEL=m CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=y @@ -1028,6 +1015,7 @@ CONFIG_NETFILTER_INGRESS=y CONFIG_NETFILTER_EGRESS=y CONFIG_NETFILTER_NETLINK=m CONFIG_NETFILTER_FAMILY_BRIDGE=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set CONFIG_NETFILTER_NETLINK_ACCT=m CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NETFILTER_NETLINK_LOG=m @@ -1061,7 +1049,31 @@ CONFIG_NF_NAT=y CONFIG_NF_NAT_FTP=m CONFIG_NF_NAT_REDIRECT=y CONFIG_NF_NAT_MASQUERADE=y -# CONFIG_NF_TABLES is not set +CONFIG_NF_TABLES=m +# CONFIG_NF_TABLES_INET is not set +# CONFIG_NF_TABLES_NETDEV is not set +# CONFIG_NFT_NUMGEN is not set +CONFIG_NFT_CT=m +# CONFIG_NFT_CONNLIMIT is not set +# CONFIG_NFT_LOG is not set +# CONFIG_NFT_LIMIT is not set +# CONFIG_NFT_MASQ is not set +# CONFIG_NFT_REDIR is not set +# CONFIG_NFT_NAT is not set +# CONFIG_NFT_TUNNEL is not set +# CONFIG_NFT_OBJREF is not set +# CONFIG_NFT_QUEUE is not set +# CONFIG_NFT_QUOTA is not set +# CONFIG_NFT_REJECT is not set +# CONFIG_NFT_COMPAT is not set +# CONFIG_NFT_HASH is not set +CONFIG_NFT_FIB=m +# CONFIG_NFT_XFRM is not set +# CONFIG_NFT_SOCKET is not set +# CONFIG_NFT_OSF is not set +# CONFIG_NFT_TPROXY is not set +# CONFIG_NFT_SYNPROXY is not set +# CONFIG_NF_FLOW_TABLE is not set CONFIG_NETFILTER_XTABLES=y CONFIG_NETFILTER_XTABLES_COMPAT=y @@ -1158,6 +1170,10 @@ CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NF_DEFRAG_IPV4=y # CONFIG_NF_SOCKET_IPV4 is not set # CONFIG_NF_TPROXY_IPV4 is not set +CONFIG_NF_TABLES_IPV4=y +# CONFIG_NFT_DUP_IPV4 is not set +CONFIG_NFT_FIB_IPV4=m +# CONFIG_NF_TABLES_ARP is not set # CONFIG_NF_DUP_IPV4 is not set # CONFIG_NF_LOG_ARP is not set CONFIG_NF_LOG_IPV4=y @@ -1188,6 +1204,9 @@ CONFIG_IP_NF_RAW=m # # CONFIG_NF_SOCKET_IPV6 is not set # CONFIG_NF_TPROXY_IPV6 is not set +CONFIG_NF_TABLES_IPV6=y +# CONFIG_NFT_DUP_IPV6 is not set +CONFIG_NFT_FIB_IPV6=m # CONFIG_NF_DUP_IPV6 is not set CONFIG_NF_REJECT_IPV6=m CONFIG_NF_LOG_IPV6=m @@ -1215,9 +1234,11 @@ CONFIG_IP6_NF_TARGET_MASQUERADE=m # end of IPv6: Netfilter Configuration CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_NF_TABLES_BRIDGE is not set # CONFIG_NF_CONNTRACK_BRIDGE is not set # CONFIG_BRIDGE_NF_EBTABLES is not set -# CONFIG_BPFILTER is not set +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=y # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set # CONFIG_RDS is not set @@ -1236,7 +1257,6 @@ CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y -# CONFIG_DECNET is not set CONFIG_LLC=m # CONFIG_LLC2 is not set # CONFIG_ATALK is not set @@ -1408,9 +1428,12 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # Firmware loader # CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_PAGED_BUF=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set -# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set CONFIG_FW_CACHE=y # CONFIG_FW_UPLOAD is not set # end of Firmware loader @@ -1457,7 +1480,6 @@ CONFIG_VEXPRESS_CONFIG=y CONFIG_ARM_SCPI_PROTOCOL=y CONFIG_ARM_SCPI_POWER_DOMAIN=y -CONFIG_RASPBERRYPI_FIRMWARE=y # CONFIG_ARM_FFA_TRANSPORT is not set # CONFIG_GOOGLE_FIRMWARE is not set CONFIG_ARM_PSCI_FW=y @@ -1607,6 +1629,7 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_XEN_BLKDEV_FRONTEND is not set # CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_UBLK is not set # # NVME Support @@ -1638,6 +1661,8 @@ CONFIG_SRAM=y # CONFIG_XILINX_SDFEC is not set # CONFIG_HISI_HIKEY_USB is not set # CONFIG_OPEN_DICE is not set +# CONFIG_PPKB_POWER_MANAGER is not set +# CONFIG_VCPU_STALL_DETECTOR is not set # CONFIG_MODEM_POWER is not set # CONFIG_C2PORT is not set @@ -1714,7 +1739,6 @@ CONFIG_SCSI_HISI_SAS=m # CONFIG_SCSI_DH is not set # end of SCSI device support -CONFIG_HAVE_PATA_PLATFORM=y CONFIG_ATA=m CONFIG_SATA_HOST=y CONFIG_ATA_VERBOSE_ERROR=y @@ -1725,6 +1749,7 @@ CONFIG_SATA_PMP=y # Controllers with non-SFF native interface # CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set CONFIG_AHCI_CEVA=m CONFIG_AHCI_XGENE=m CONFIG_AHCI_QORIQ=m @@ -1747,7 +1772,7 @@ CONFIG_ATA_BMDMA=y # # PIO-only SFF controllers # -# CONFIG_PATA_PLATFORM is not set +# CONFIG_PATA_OF_PLATFORM is not set # # Generic fallback / legacy drivers @@ -1852,6 +1877,8 @@ CONFIG_HNS_ENET=y CONFIG_NET_VENDOR_HUAWEI=y CONFIG_NET_VENDOR_I825XX=y CONFIG_NET_VENDOR_INTEL=y +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_NET_VENDOR_ADI=y CONFIG_NET_VENDOR_LITEX=y # CONFIG_LITEX_LITEETH is not set CONFIG_NET_VENDOR_MARVELL=y @@ -1968,6 +1995,7 @@ CONFIG_SMSC_PHY=m # CONFIG_VITESSE_PHY is not set # CONFIG_XILINX_GMII2RGMII is not set # CONFIG_MICREL_KS8995MA is not set +# CONFIG_PSE_CONTROLLER is not set CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y CONFIG_FWNODE_MDIO=y @@ -2206,18 +2234,18 @@ CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_DLINK_DIR685 is not set # CONFIG_KEYBOARD_LKKBD is not set CONFIG_KEYBOARD_GPIO=y -CONFIG_KEYBOARD_GPIO_POLLED=y +CONFIG_KEYBOARD_GPIO_POLLED=m # CONFIG_KEYBOARD_TCA6416 is not set # CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set +CONFIG_KEYBOARD_MATRIX=m +CONFIG_KEYBOARD_LM8323=m # CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set +CONFIG_KEYBOARD_MAX7359=m # CONFIG_KEYBOARD_MCS is not set # CONFIG_KEYBOARD_MPR121 is not set # CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_PINEPHONE is not set +CONFIG_KEYBOARD_OPENCORES=m +CONFIG_KEYBOARD_PINEPHONE=m # CONFIG_KEYBOARD_SAMSUNG is not set # CONFIG_KEYBOARD_STOWAWAY is not set # CONFIG_KEYBOARD_SUNKBD is not set @@ -2300,7 +2328,6 @@ CONFIG_TOUCHSCREEN_GOODIX=y # CONFIG_TOUCHSCREEN_MK712 is not set # CONFIG_TOUCHSCREEN_PENMOUNT is not set CONFIG_TOUCHSCREEN_EDT_FT5X06=y -# CONFIG_TOUCHSCREEN_RASPBERRYPI_FW is not set # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set # CONFIG_TOUCHSCREEN_PIXCIR is not set @@ -2322,6 +2349,7 @@ CONFIG_TOUCHSCREEN_EDT_FT5X06=y # CONFIG_TOUCHSCREEN_TPS6507X is not set # CONFIG_TOUCHSCREEN_ZET6223 is not set # CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set # CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set # CONFIG_TOUCHSCREEN_IQS5XX is not set # CONFIG_TOUCHSCREEN_ZINITIX is not set @@ -2350,6 +2378,7 @@ CONFIG_INPUT_RK805_PWRKEY=m # CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set # CONFIG_INPUT_DA7280_HAPTICS is not set # CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IBM_PANEL is not set # CONFIG_INPUT_IMS_PCU is not set # CONFIG_INPUT_IQS269A is not set # CONFIG_INPUT_IQS626A is not set @@ -2408,12 +2437,10 @@ CONFIG_SERIAL_8250_NR_UARTS=4 CONFIG_SERIAL_8250_RUNTIME_UARTS=4 CONFIG_SERIAL_8250_EXTENDED=y # CONFIG_SERIAL_8250_MANY_PORTS is not set -# CONFIG_SERIAL_8250_ASPEED_VUART is not set CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_SERIAL_8250_DETECT_IRQ is not set # CONFIG_SERIAL_8250_RSA is not set CONFIG_SERIAL_8250_DWLIB=y -# CONFIG_SERIAL_8250_BCM2835AUX is not set CONFIG_SERIAL_8250_FSL=y CONFIG_SERIAL_8250_DW=y # CONFIG_SERIAL_8250_RT288X is not set @@ -2460,8 +2487,6 @@ CONFIG_SERIAL_DEV_CTRL_TTYPORT=y CONFIG_HW_RANDOM=m # CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_HW_RANDOM_BA431 is not set -CONFIG_HW_RANDOM_BCM2835=m -CONFIG_HW_RANDOM_IPROC_RNG200=m CONFIG_HW_RANDOM_OPTEE=m # CONFIG_HW_RANDOM_CCTRNG is not set # CONFIG_HW_RANDOM_XIPHERA is not set @@ -2470,6 +2495,7 @@ CONFIG_DEVMEM=y CONFIG_TCG_TPM=y # CONFIG_TCG_TIS is not set # CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C is not set # CONFIG_TCG_TIS_I2C_CR50 is not set # CONFIG_TCG_TIS_I2C_ATMEL is not set CONFIG_TCG_TIS_I2C_INFINEON=y @@ -2519,8 +2545,6 @@ CONFIG_I2C_ALGOBIT=y # # I2C system bus drivers (mostly embedded / system-on-chip) # -# CONFIG_I2C_BCM2835 is not set -CONFIG_I2C_BRCMSTB=y # CONFIG_I2C_CADENCE is not set # CONFIG_I2C_CBUS_GPIO is not set CONFIG_I2C_DESIGNWARE_CORE=y @@ -2571,8 +2595,6 @@ CONFIG_SPI_MEM=y # # CONFIG_SPI_ALTERA is not set # CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BCM2835 is not set -# CONFIG_SPI_BCM2835AUX is not set # CONFIG_SPI_BITBANG is not set # CONFIG_SPI_CADENCE is not set # CONFIG_SPI_CADENCE_QUADSPI is not set @@ -2581,6 +2603,8 @@ CONFIG_SPI_MEM=y # CONFIG_SPI_NXP_FLEXSPI is not set # CONFIG_SPI_GPIO is not set # CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set # CONFIG_SPI_OC_TINY is not set CONFIG_SPI_PL022=y CONFIG_SPI_ROCKCHIP=y @@ -2644,6 +2668,7 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y CONFIG_PINCONF=y CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_AXP209 is not set +# CONFIG_PINCTRL_CY8C95X0 is not set CONFIG_PINCTRL_MAX77620=y # CONFIG_PINCTRL_MCP23S08 is not set # CONFIG_PINCTRL_MICROCHIP_SGPIO is not set @@ -2653,7 +2678,6 @@ CONFIG_PINCTRL_ROCKCHIP=y CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_STMFX is not set # CONFIG_PINCTRL_SX150X is not set -CONFIG_PINCTRL_BCM2835=y # # Renesas pinctrl drivers @@ -2673,7 +2697,6 @@ CONFIG_GPIO_GENERIC=y # # CONFIG_GPIO_74XX_MMIO is not set # CONFIG_GPIO_ALTERA is not set -CONFIG_GPIO_RASPBERRYPI_EXP=y # CONFIG_GPIO_CADENCE is not set CONFIG_GPIO_DWAPB=y # CONFIG_GPIO_FTGPIO010 is not set @@ -2684,7 +2707,6 @@ CONFIG_GPIO_GENERIC_PLATFORM=y CONFIG_GPIO_MB86S7X=y CONFIG_GPIO_PL061=y CONFIG_GPIO_ROCKCHIP=y -# CONFIG_GPIO_SAMA5D2_PIOBU is not set # CONFIG_GPIO_SIFIVE is not set # CONFIG_GPIO_SYSCON is not set CONFIG_GPIO_XGENE=y @@ -2695,7 +2717,6 @@ CONFIG_GPIO_XGENE=y # # I2C GPIO expanders # -# CONFIG_GPIO_ADP5588 is not set # CONFIG_GPIO_ADNP is not set # CONFIG_GPIO_GW_PLD is not set # CONFIG_GPIO_MAX7300 is not set @@ -2749,19 +2770,19 @@ CONFIG_POWER_RESET_GPIO_RESTART=y CONFIG_POWER_RESET_VEXPRESS=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -CONFIG_REBOOT_MODE=y -CONFIG_SYSCON_REBOOT_MODE=y -# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_REBOOT_MODE=m +CONFIG_SYSCON_REBOOT_MODE=m +CONFIG_NVMEM_REBOOT_MODE=m CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y # CONFIG_PDA_POWER is not set # CONFIG_GENERIC_ADC_BATTERY is not set -# CONFIG_IP5XXX_POWER is not set +CONFIG_IP5XXX_POWER=y # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_CW2015 is not set +CONFIG_BATTERY_CW2015=m # CONFIG_BATTERY_DS2780 is not set # CONFIG_BATTERY_DS2781 is not set # CONFIG_BATTERY_DS2782 is not set @@ -2769,8 +2790,8 @@ CONFIG_POWER_SUPPLY_HWMON=y CONFIG_BATTERY_SBS=m # CONFIG_CHARGER_SBS is not set # CONFIG_MANAGER_SBS is not set -CONFIG_BATTERY_BQ27XXX=y -CONFIG_BATTERY_BQ27XXX_I2C=y +CONFIG_BATTERY_BQ27XXX=m +CONFIG_BATTERY_BQ27XXX_I2C=m # CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set CONFIG_CHARGER_AXP20X=y CONFIG_BATTERY_AXP20X=y @@ -2794,6 +2815,7 @@ CONFIG_CHARGER_GPIO=y # CONFIG_CHARGER_BQ25890 is not set # CONFIG_CHARGER_BQ25980 is not set # CONFIG_CHARGER_BQ256XX is not set +# CONFIG_CHARGER_RK817 is not set # CONFIG_CHARGER_SMB347 is not set # CONFIG_BATTERY_GAUGE_LTC2941 is not set # CONFIG_BATTERY_GOLDFISH is not set @@ -2814,7 +2836,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_AD7314 is not set # CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set # CONFIG_SENSORS_ADM1025 is not set # CONFIG_SENSORS_ADM1026 is not set # CONFIG_SENSORS_ADM1029 is not set @@ -2833,7 +2854,6 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ASC7621 is not set # CONFIG_SENSORS_AXI_FAN_CONTROL is not set CONFIG_SENSORS_ARM_SCPI=y -# CONFIG_SENSORS_ASPEED is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_CORSAIR_CPRO is not set # CONFIG_SENSORS_CORSAIR_PSU is not set @@ -2874,10 +2894,10 @@ CONFIG_SENSORS_GPIO_FAN=y # CONFIG_SENSORS_MAX197 is not set # CONFIG_SENSORS_MAX31722 is not set # CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set # CONFIG_SENSORS_MAX6620 is not set # CONFIG_SENSORS_MAX6621 is not set # CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_MAX6697 is not set # CONFIG_SENSORS_MAX31790 is not set @@ -2917,7 +2937,6 @@ CONFIG_SENSORS_LM90=m # CONFIG_SENSORS_PCF8591 is not set # CONFIG_PMBUS is not set # CONFIG_SENSORS_PWM_FAN is not set -# CONFIG_SENSORS_RASPBERRYPI_HWMON is not set # CONFIG_SENSORS_SBTSI is not set # CONFIG_SENSORS_SBRMI is not set # CONFIG_SENSORS_SHT15 is not set @@ -2925,10 +2944,10 @@ CONFIG_SENSORS_LM90=m # CONFIG_SENSORS_SHT3x is not set # CONFIG_SENSORS_SHT4x is not set # CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SY7636A is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_EMC1403 is not set # CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set # CONFIG_SENSORS_EMC6W201 is not set # CONFIG_SENSORS_SMSC47M1 is not set # CONFIG_SENSORS_SMSC47M192 is not set @@ -2989,14 +3008,6 @@ CONFIG_THERMAL_EMULATION=y # CONFIG_THERMAL_MMIO is not set # CONFIG_MAX77620_THERMAL is not set CONFIG_ROCKCHIP_THERMAL=m - -# -# Broadcom thermal drivers -# -# CONFIG_BCM2711_THERMAL is not set -# CONFIG_BCM2835_THERMAL is not set -# end of Broadcom thermal drivers - # CONFIG_GENERIC_ADC_THERMAL is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y @@ -3025,7 +3036,6 @@ CONFIG_ARM_SP805_WATCHDOG=y # CONFIG_MAX63XX_WATCHDOG is not set # CONFIG_MAX77620_WATCHDOG is not set # CONFIG_ARM_SMC_WATCHDOG is not set -# CONFIG_BCM2835_WDT is not set # CONFIG_MEN_A21_WDT is not set # CONFIG_XEN_WDT is not set @@ -3088,22 +3098,25 @@ CONFIG_MFD_MAX77620=y # CONFIG_MFD_MAX8997 is not set # CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set # CONFIG_EZX_PCAP is not set # CONFIG_MFD_CPCAP is not set # CONFIG_MFD_VIPERBOARD is not set # CONFIG_MFD_NTXEC is not set # CONFIG_MFD_RETU is not set # CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set # CONFIG_MFD_RC5T583 is not set CONFIG_MFD_RK808=y # CONFIG_MFD_RN5T618 is not set CONFIG_MFD_SEC_CORE=y # CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SIMPLE_MFD_I2C is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set @@ -3222,7 +3235,6 @@ CONFIG_REGULATOR_RK808=y CONFIG_REGULATOR_S2MPS11=y # CONFIG_REGULATOR_S5M8767 is not set # CONFIG_REGULATOR_SLG51000 is not set -# CONFIG_REGULATOR_SY7636A is not set # CONFIG_REGULATOR_SY8106A is not set # CONFIG_REGULATOR_SY8824X is not set # CONFIG_REGULATOR_SY8827N is not set @@ -3307,7 +3319,6 @@ CONFIG_MEDIA_USB_SUPPORT=y # # Webcam devices # -# CONFIG_VIDEO_CPIA2 is not set CONFIG_USB_GSPCA=m # CONFIG_USB_GSPCA_BENQ is not set # CONFIG_USB_GSPCA_CONEX is not set @@ -3360,11 +3371,9 @@ CONFIG_USB_GSPCA=m # CONFIG_USB_STV06XX is not set # CONFIG_USB_PWC is not set # CONFIG_USB_S2255 is not set -# CONFIG_USB_STKWEBCAM is not set # CONFIG_VIDEO_USBTV is not set CONFIG_USB_VIDEO_CLASS=m CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y -# CONFIG_USB_ZR364XX is not set # # Webcam, TV (analog/digital) USB devices @@ -3457,6 +3466,12 @@ CONFIG_VIDEO_ROCKCHIP_ISP1=m # Texas Instruments drivers # +# +# Verisilicon media platform drivers +# +CONFIG_VIDEO_HANTRO=m +CONFIG_VIDEO_HANTRO_ROCKCHIP=y + # # VIA media platform drivers # @@ -3481,6 +3496,7 @@ CONFIG_VIDEOBUF2_DMA_SG=m # # Camera sensor devices # +# CONFIG_VIDEO_AR0521 is not set # CONFIG_VIDEO_HI556 is not set # CONFIG_VIDEO_HI846 is not set # CONFIG_VIDEO_HI847 is not set @@ -3556,7 +3572,7 @@ CONFIG_VIDEO_OV8858=m # # CONFIG_VIDEO_AD5820 is not set # CONFIG_VIDEO_AK7375 is not set -# CONFIG_VIDEO_DW9714 is not set +CONFIG_VIDEO_DW9714=m # CONFIG_VIDEO_DW9768 is not set # CONFIG_VIDEO_DW9807_VCM is not set # end of Lens drivers @@ -3681,6 +3697,7 @@ CONFIG_VIDEO_OV8858=m # # Graphics support # +CONFIG_APERTURE_HELPERS=y CONFIG_DRM=y CONFIG_DRM_MIPI_DSI=y # CONFIG_DRM_DEBUG_MM is not set @@ -3688,12 +3705,13 @@ CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_OVERALLOC=100 CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_BUS=y CONFIG_DRM_DISPLAY_HELPER=y CONFIG_DRM_DISPLAY_DP_HELPER=y CONFIG_DRM_DISPLAY_HDMI_HELPER=y # CONFIG_DRM_DP_AUX_CHARDEV is not set # CONFIG_DRM_DP_CEC is not set -CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_GEM_DMA_HELPER=y CONFIG_DRM_GEM_SHMEM_HELPER=y CONFIG_DRM_SCHED=y @@ -3710,7 +3728,7 @@ CONFIG_DRM_I2C_SIL164=m # ARM devices # # CONFIG_DRM_HDLCD is not set -CONFIG_DRM_MALI_DISPLAY=y +CONFIG_DRM_MALI_DISPLAY=m # CONFIG_DRM_KOMEDA is not set # end of ARM devices @@ -3730,7 +3748,7 @@ CONFIG_ROCKCHIP_RGB=y CONFIG_DRM_UDL=m # CONFIG_DRM_RCAR_DW_HDMI is not set # CONFIG_DRM_RCAR_USE_LVDS is not set -# CONFIG_DRM_RCAR_MIPI_DSI is not set +# CONFIG_DRM_RCAR_USE_MIPI_DSI is not set CONFIG_DRM_PANEL=y # @@ -3744,8 +3762,9 @@ CONFIG_DRM_PANEL=y # CONFIG_DRM_PANEL_BOE_TV101WUM_NL6 is not set # CONFIG_DRM_PANEL_DSI_CM is not set # CONFIG_DRM_PANEL_LVDS is not set -CONFIG_DRM_PANEL_SIMPLE=y -# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +CONFIG_DRM_PANEL_EDP=y +# CONFIG_DRM_PANEL_EBBG_FT8719 is not set # CONFIG_DRM_PANEL_ELIDA_KD35T133 is not set CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02=y CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D=y @@ -3842,6 +3861,7 @@ CONFIG_DRM_SIMPLE_BRIDGE=y # CONFIG_DRM_TOSHIBA_TC358767 is not set # CONFIG_DRM_TOSHIBA_TC358768 is not set # CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set # CONFIG_DRM_TI_TFP410 is not set # CONFIG_DRM_TI_SN65DSI83 is not set # CONFIG_DRM_TI_SN65DSI86 is not set @@ -3862,10 +3882,11 @@ CONFIG_DRM_DW_HDMI_I2S_AUDIO=m CONFIG_DRM_DW_MIPI_DSI=y # end of Display Interface Bridges -# CONFIG_DRM_VC4 is not set # CONFIG_DRM_ETNAVIV is not set # CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set # CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_IMX_LCDIF is not set # CONFIG_DRM_ARCPGU is not set # CONFIG_DRM_GM12U320 is not set # CONFIG_DRM_PANEL_MIPI_DBI is not set @@ -3968,10 +3989,11 @@ CONFIG_DUMMY_CONSOLE=y CONFIG_DUMMY_CONSOLE_COLUMNS=80 CONFIG_DUMMY_CONSOLE_ROWS=25 CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y +CONFIG_BOOTSPLASH=y # end of Console display driver support CONFIG_LOGO=y @@ -4004,7 +4026,9 @@ CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_PROC_FS=y CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y # CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set # CONFIG_SND_SEQUENCER is not set CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set @@ -4039,7 +4063,6 @@ CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y # CONFIG_SND_SOC_AMD_ACP is not set # CONFIG_SND_AMD_ACP_CONFIG is not set # CONFIG_SND_ATMEL_SOC is not set -# CONFIG_SND_BCM2835_SOC_I2S is not set # CONFIG_SND_BCM63XX_I2S_WHISTLER is not set # CONFIG_SND_DESIGNWARE_I2S is not set @@ -4127,6 +4150,7 @@ CONFIG_SND_SOC_BT_SCO=m # CONFIG_SND_SOC_CS42L52 is not set # CONFIG_SND_SOC_CS42L56 is not set # CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set # CONFIG_SND_SOC_CS4234 is not set # CONFIG_SND_SOC_CS4265 is not set # CONFIG_SND_SOC_CS4270 is not set @@ -4146,10 +4170,12 @@ CONFIG_SND_SOC_HDMI_CODEC=m # CONFIG_SND_SOC_ES7134 is not set # CONFIG_SND_SOC_ES7241 is not set CONFIG_SND_SOC_ES8316=m +# CONFIG_SND_SOC_ES8326 is not set CONFIG_SND_SOC_ES8328=m CONFIG_SND_SOC_ES8328_I2C=m CONFIG_SND_SOC_ES8328_SPI=m # CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set # CONFIG_SND_SOC_ICS43432 is not set # CONFIG_SND_SOC_INNO_RK3036 is not set # CONFIG_SND_SOC_MAX98088 is not set @@ -4193,6 +4219,7 @@ CONFIG_SND_SOC_RT5645=m CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m # CONFIG_SND_SOC_SIMPLE_MUX is not set # CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set # CONFIG_SND_SOC_SSM2305 is not set # CONFIG_SND_SOC_SSM2518 is not set # CONFIG_SND_SOC_SSM2602_SPI is not set @@ -4205,6 +4232,7 @@ CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m # CONFIG_SND_SOC_TAS2562 is not set # CONFIG_SND_SOC_TAS2764 is not set # CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set # CONFIG_SND_SOC_TAS5086 is not set # CONFIG_SND_SOC_TAS571X is not set # CONFIG_SND_SOC_TAS5720 is not set @@ -4327,6 +4355,7 @@ CONFIG_HID_EZKEY=y # CONFIG_HID_UCLOGIC is not set # CONFIG_HID_WALTOP is not set # CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set # CONFIG_HID_XIAOMI is not set # CONFIG_HID_GYRATION is not set # CONFIG_HID_ICADE is not set @@ -4362,6 +4391,7 @@ CONFIG_HID_MULTITOUCH=y # CONFIG_HID_PETALYNX is not set # CONFIG_HID_PICOLCD is not set # CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PXRC is not set # CONFIG_HID_RAZER is not set # CONFIG_HID_PRIMAX is not set # CONFIG_HID_RETRODE is not set @@ -4380,6 +4410,7 @@ CONFIG_HID_MULTITOUCH=y # CONFIG_HID_SMARTJOYPLUS is not set # CONFIG_HID_TIVO is not set # CONFIG_HID_TOPSEED is not set +# CONFIG_HID_TOPRE is not set # CONFIG_HID_THINGM is not set # CONFIG_HID_THRUSTMASTER is not set # CONFIG_HID_UDRAW_PS3 is not set @@ -4406,6 +4437,7 @@ CONFIG_USB_HID=y # I2C HID support # # CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set # CONFIG_I2C_HID_OF_GOODIX is not set # end of I2C HID support # end of HID support @@ -4620,6 +4652,7 @@ CONFIG_USB_HSIC_USB3503=y # CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set # CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set # # USB Physical Layer drivers @@ -4735,7 +4768,9 @@ CONFIG_TYPEC_TCPCI=y CONFIG_TYPEC_FUSB302=y CONFIG_TYPEC_UCSI=y # CONFIG_UCSI_CCG is not set +# CONFIG_UCSI_STM32G0 is not set # CONFIG_TYPEC_TPS6598X is not set +# CONFIG_TYPEC_ANX7411 is not set # CONFIG_TYPEC_ANX7688 is not set # CONFIG_TYPEC_RT1719 is not set # CONFIG_TYPEC_HD3SS3220 is not set @@ -4776,13 +4811,11 @@ CONFIG_MMC_STM32_SDMMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_ARASAN=y -# CONFIG_MMC_SDHCI_OF_ASPEED is not set # CONFIG_MMC_SDHCI_OF_AT91 is not set # CONFIG_MMC_SDHCI_OF_DWCMSHC is not set CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC_SDHCI_F_SDH30=y # CONFIG_MMC_SDHCI_MILBEAUT is not set -# CONFIG_MMC_SDHCI_IPROC is not set CONFIG_MMC_SPI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_PLTFM=y @@ -4796,7 +4829,6 @@ CONFIG_MMC_DW_ROCKCHIP=y # CONFIG_MMC_USDHI6ROL0 is not set CONFIG_MMC_CQHCI=y # CONFIG_MMC_HSQ is not set -# CONFIG_MMC_BCM2835 is not set # CONFIG_MMC_MTK is not set CONFIG_MMC_SDHCI_XENON=y # CONFIG_MMC_SDHCI_OMAP is not set @@ -4941,6 +4973,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_HYM8563 is not set # CONFIG_RTC_DRV_MAX6900 is not set CONFIG_RTC_DRV_MAX77686=y +# CONFIG_RTC_DRV_NCT3018Y is not set CONFIG_RTC_DRV_RK808=y # CONFIG_RTC_DRV_RS5C372 is not set # CONFIG_RTC_DRV_ISL1208 is not set @@ -5040,7 +5073,6 @@ CONFIG_DMA_OF=y # CONFIG_ALTERA_MSGDMA is not set # CONFIG_AMBA_PL08X is not set CONFIG_BCM_SBA_RAID=m -# CONFIG_DMA_BCM2835 is not set # CONFIG_DW_AXI_DMAC is not set # CONFIG_FSL_EDMA is not set # CONFIG_FSL_QDMA is not set @@ -5149,12 +5181,6 @@ CONFIG_RTL8723BS=m # CONFIG_ADT7316 is not set # end of Analog digital bi-direction converters -# -# Capacitance to digital converters -# -# CONFIG_AD7746 is not set -# end of Capacitance to digital converters - # # Direct Digital Synthesis # @@ -5182,26 +5208,16 @@ CONFIG_RTL8723BS=m # end of IIO staging drivers CONFIG_STAGING_MEDIA=y -CONFIG_VIDEO_HANTRO=m -CONFIG_VIDEO_HANTRO_ROCKCHIP=y # CONFIG_VIDEO_MAX96712 is not set CONFIG_VIDEO_ROCKCHIP_VDEC=m +# CONFIG_STAGING_MEDIA_DEPRECATED is not set # CONFIG_STAGING_BOARD is not set # CONFIG_LTE_GDM724X is not set -# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set # CONFIG_FB_TFT is not set # CONFIG_KS7010 is not set -CONFIG_BCM_VIDEOCORE=y -# CONFIG_BCM2835_VCHIQ is not set -# CONFIG_SND_BCM2835 is not set -# CONFIG_VIDEO_BCM2835 is not set # CONFIG_PI433 is not set # CONFIG_XIL_AXIS_FIFO is not set # CONFIG_FIELDBUS_DEV is not set - -# -# VME Device Drivers -# # CONFIG_RTL8723CS is not set # CONFIG_GOLDFISH is not set CONFIG_CHROME_PLATFORMS=y @@ -5210,6 +5226,7 @@ CONFIG_CROS_EC=y # CONFIG_CROS_EC_RPMSG is not set # CONFIG_CROS_EC_SPI is not set CONFIG_CROS_EC_PROTO=y +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set CONFIG_CROS_EC_CHARDEV=y CONFIG_CROS_EC_LIGHTBAR=y CONFIG_CROS_EC_VBC=y @@ -5251,10 +5268,8 @@ CONFIG_COMMON_CLK_XGENE=y CONFIG_COMMON_CLK_PWM=y # CONFIG_COMMON_CLK_RS9_PCIE is not set # CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set # CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_CLK_BCM2711_DVP=y -CONFIG_CLK_BCM2835=y -# CONFIG_CLK_RASPBERRYPI is not set CONFIG_COMMON_CLK_ROCKCHIP=y CONFIG_CLK_PX30=y CONFIG_CLK_RK3308=y @@ -5263,6 +5278,7 @@ CONFIG_CLK_RK3368=y CONFIG_CLK_RK3399=y CONFIG_CLK_RK3568=y # CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set CONFIG_HWSPINLOCK=y # @@ -5278,7 +5294,6 @@ CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y CONFIG_FSL_ERRATUM_A008585=y CONFIG_HISILICON_ERRATUM_161010101=y CONFIG_ARM64_ERRATUM_858921=y -CONFIG_ARM_TIMER_SP804=y # CONFIG_MICROCHIP_PIT64B is not set # end of Clock Source drivers @@ -5289,7 +5304,6 @@ CONFIG_PLATFORM_MHU=y # CONFIG_PL320_MBOX is not set CONFIG_ROCKCHIP_MBOX=y # CONFIG_ALTERA_MBOX is not set -CONFIG_BCM2835_MBOX=y # CONFIG_MAILBOX_TEST is not set CONFIG_IOMMU_IOVA=y CONFIG_IOMMU_API=y @@ -5303,6 +5317,7 @@ CONFIG_IOMMU_IO_PGTABLE_LPAE=y # CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y # CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set # end of Generic IOMMU Pagetable Support # CONFIG_IOMMU_DEBUGFS is not set @@ -5350,8 +5365,6 @@ CONFIG_RPMSG_QCOM_GLINK_RPM=y # # Broadcom SoC drivers # -CONFIG_BCM2835_POWER=y -# CONFIG_RASPBERRYPI_POWER is not set CONFIG_SOC_BRCMSTB=y # end of Broadcom SoC drivers @@ -5362,6 +5375,11 @@ CONFIG_SOC_BRCMSTB=y # CONFIG_FSL_RCPM is not set # end of NXP/Freescale QorIQ SoC drivers +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + # # i.MX SoC drivers # @@ -5478,6 +5496,7 @@ CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m # CONFIG_MMA8452 is not set # CONFIG_MMA9551 is not set # CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set # CONFIG_MXC4005 is not set # CONFIG_MXC6255 is not set # CONFIG_SCA3000 is not set @@ -5524,6 +5543,7 @@ CONFIG_AXP20X_ADC=y # CONFIG_MAX1027 is not set # CONFIG_MAX11100 is not set # CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set # CONFIG_MAX1241 is not set # CONFIG_MAX1363 is not set # CONFIG_MAX9611 is not set @@ -5535,6 +5555,7 @@ CONFIG_AXP20X_ADC=y # CONFIG_QCOM_SPMI_VADC is not set # CONFIG_QCOM_SPMI_ADC5 is not set CONFIG_ROCKCHIP_SARADC=y +# CONFIG_RICHTEK_RTQ6056 is not set # CONFIG_SD_ADC_MODULATOR is not set # CONFIG_TI_ADC081C is not set # CONFIG_TI_ADC0832 is not set @@ -5579,6 +5600,7 @@ CONFIG_ROCKCHIP_SARADC=y # Capacitance to digital converters # # CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set # end of Capacitance to digital converters # @@ -5750,6 +5772,8 @@ CONFIG_IIO_ST_SENSORS_CORE=m # CONFIG_ADIS16480 is not set # CONFIG_BMI160_I2C is not set # CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_SERIAL is not set +# CONFIG_BOSCH_BNO055_I2C is not set # CONFIG_FXOS8700_I2C is not set # CONFIG_FXOS8700_SPI is not set # CONFIG_KMX61 is not set @@ -5787,6 +5811,7 @@ CONFIG_INV_MPU6050_I2C=m # CONFIG_JSA1212 is not set # CONFIG_RPR0521 is not set # CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set # CONFIG_LV0104CS is not set # CONFIG_MAX44000 is not set # CONFIG_MAX44009 is not set @@ -5808,20 +5833,21 @@ CONFIG_STK3310=m # CONFIG_VCNL4000 is not set # CONFIG_VCNL4035 is not set # CONFIG_VEML6030 is not set -# CONFIG_VEML6070 is not set -# CONFIG_VL6180 is not set -# CONFIG_ZOPT2201 is not set +CONFIG_VEML6070=m +CONFIG_VL6180=m +CONFIG_ZOPT2201=m # end of Light sensors # # Magnetometer sensors # -# CONFIG_AF8133J is not set +CONFIG_AF8133J=m # CONFIG_AK8974 is not set -# CONFIG_AK8975 is not set -# CONFIG_AK09911 is not set -# CONFIG_BMC150_MAGN_I2C is not set -# CONFIG_BMC150_MAGN_SPI is not set +CONFIG_AK8975=m +CONFIG_AK09911=m +CONFIG_BMC150_MAGN=m +CONFIG_BMC150_MAGN_I2C=m +CONFIG_BMC150_MAGN_SPI=m # CONFIG_MAG3110 is not set # CONFIG_MMC35240 is not set CONFIG_IIO_ST_MAGN_3AXIS=m @@ -5952,11 +5978,10 @@ CONFIG_IIO_ST_PRESS_SPI=m CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_ATMEL_TCB is not set -# CONFIG_PWM_BCM2835 is not set +# CONFIG_PWM_CLK is not set # CONFIG_PWM_CROS_EC is not set # CONFIG_PWM_FSL_FTM is not set # CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_RASPBERRYPI_POE is not set CONFIG_PWM_ROCKCHIP=y # CONFIG_PWM_XILINX is not set @@ -5969,7 +5994,6 @@ CONFIG_ARM_GIC_MAX_NR=1 CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y # CONFIG_AL_FIC is not set -CONFIG_BRCMSTB_L2_IRQ=y # CONFIG_XILINX_INTC is not set CONFIG_PARTITION_PERCPU=y # end of IRQ chip support @@ -5977,9 +6001,8 @@ CONFIG_PARTITION_PERCPU=y # CONFIG_IPACK_BUS is not set CONFIG_ARCH_HAS_RESET_CONTROLLER=y CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_RASPBERRYPI is not set -CONFIG_RESET_SIMPLE=y # CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set # # PHY Subsystem @@ -6017,6 +6040,7 @@ CONFIG_PHY_ROCKCHIP_INNO_CSIDPHY=y CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY=y # CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY is not set CONFIG_PHY_ROCKCHIP_PCIE=y +# CONFIG_PHY_ROCKCHIP_SNPS_PCIE3 is not set CONFIG_PHY_ROCKCHIP_TYPEC=y CONFIG_PHY_ROCKCHIP_USB=m # CONFIG_PHY_SAMSUNG_USB2 is not set @@ -6042,7 +6066,6 @@ CONFIG_RAS=y # # Android # -CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_BINDERFS=y CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" @@ -6053,10 +6076,11 @@ CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" # CONFIG_DAX is not set CONFIG_NVMEM=y CONFIG_NVMEM_SYSFS=y -# CONFIG_NVMEM_SPMI_SDAM is not set -CONFIG_ROCKCHIP_EFUSE=y -CONFIG_ROCKCHIP_OTP=m # CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_ROCKCHIP_EFUSE is not set +# CONFIG_NVMEM_ROCKCHIP_OTP is not set +# CONFIG_NVMEM_SPMI_SDAM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set # # HW tracing support @@ -6421,6 +6445,7 @@ CONFIG_LSM="yama,loadpin,integrity,selinux,smack,tomoyo,apparmor" # Memory initialization # CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y CONFIG_INIT_STACK_NONE=y # CONFIG_INIT_STACK_ALL_PATTERN is not set @@ -6473,6 +6498,8 @@ CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_CRYPTD=y CONFIG_CRYPTO_AUTHENC=y # CONFIG_CRYPTO_TEST is not set +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper # # Public-key cryptography @@ -6486,55 +6513,71 @@ CONFIG_CRYPTO_ECDH=m # CONFIG_CRYPTO_ECRDSA is not set # CONFIG_CRYPTO_SM2 is not set # CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography # -# Authenticated Encryption with Associated Data +# Block ciphers # -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_GCM=y -# CONFIG_CRYPTO_CHACHA20POLY1305 is not set -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_SEQIV=m -CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARIA is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=m +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SM4_GENERIC is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set +# end of Block ciphers # -# Block modes +# Length-preserving ciphers and modes # +# CONFIG_CRYPTO_ADIANTUM is not set +# CONFIG_CRYPTO_ARC4 is not set +CONFIG_CRYPTO_CHACHA20=m CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_CFB is not set CONFIG_CRYPTO_CTR=y # CONFIG_CRYPTO_CTS is not set CONFIG_CRYPTO_ECB=m +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set # CONFIG_CRYPTO_LRW is not set # CONFIG_CRYPTO_OFB is not set # CONFIG_CRYPTO_PCBC is not set # CONFIG_CRYPTO_XTS is not set -# CONFIG_CRYPTO_KEYWRAP is not set -# CONFIG_CRYPTO_ADIANTUM is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ESSIV=y +# end of AEAD (authenticated encryption with associated data) ciphers # -# Hash modes +# Hashes, digests, and MACs # -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_HMAC=y -# CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_VMAC is not set - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_XXHASH=m CONFIG_CRYPTO_BLAKE2B=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=y +CONFIG_CRYPTO_CMAC=y CONFIG_CRYPTO_GHASH=y -# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MD5=m # CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_POLY1305 is not set # CONFIG_CRYPTO_RMD160 is not set CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA256=y @@ -6543,28 +6586,20 @@ CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m # CONFIG_CRYPTO_SM3_GENERIC is not set # CONFIG_CRYPTO_STREEBOG is not set +# CONFIG_CRYPTO_VMAC is not set # CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_XCBC is not set +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs # -# Ciphers +# CRCs (cyclic redundancy checks) # -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -CONFIG_CRYPTO_DES=m -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -CONFIG_CRYPTO_CHACHA20=m -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_SM4_GENERIC is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_TWOFISH is not set +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) # # Compression @@ -6575,9 +6610,10 @@ CONFIG_CRYPTO_LZO=y # CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set +# end of Compression # -# Random Number Generation +# Random number generation # CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DRBG_MENU=y @@ -6587,6 +6623,11 @@ CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG=y CONFIG_CRYPTO_JITTERENTROPY=y CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y @@ -6595,7 +6636,38 @@ CONFIG_CRYPTO_USER_API_RNG=y CONFIG_CRYPTO_USER_API_AEAD=y CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y # CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA256_ARM64=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_SHA512_ARM64=m +CONFIG_CRYPTO_SHA512_ARM64_CE=m +CONFIG_CRYPTO_SHA3_ARM64=m +# CONFIG_CRYPTO_SM3_NEON is not set +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_BS=m +# CONFIG_CRYPTO_SM4_ARM64_CE is not set +# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set +# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set @@ -6650,6 +6722,7 @@ CONFIG_INDIRECT_PIO=y # # Crypto library routines # +CONFIG_CRYPTO_LIB_UTILS=y CONFIG_CRYPTO_LIB_AES=y CONFIG_CRYPTO_LIB_ARC4=m CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y @@ -6663,10 +6736,10 @@ CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m CONFIG_CRYPTO_LIB_POLY1305=m CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y CONFIG_CRYPTO_LIB_SHA256=y # end of Crypto library routines -CONFIG_LIB_MEMNEQ=y CONFIG_CRC_CCITT=y CONFIG_CRC16=y CONFIG_CRC_T10DIF=y @@ -6693,6 +6766,7 @@ CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_COMPRESS=m CONFIG_LZ4HC_COMPRESS=m CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y CONFIG_ZSTD_COMPRESS=m CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y @@ -6748,6 +6822,7 @@ CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_API_DEBUG is not set # CONFIG_DMA_MAP_BENCHMARK is not set CONFIG_SGL_ALLOC=y +# CONFIG_FORCE_NR_CPUS is not set CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y @@ -6762,16 +6837,27 @@ CONFIG_HAVE_GENERIC_VDSO=y CONFIG_GENERIC_GETTIMEOFDAY=y CONFIG_GENERIC_VDSO_TIME_NS=y CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set +CONFIG_FONTS=y CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_6x10 is not set +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_SUN8x16 is not set +CONFIG_FONT_SUN12x22=y +CONFIG_FONT_TER16x32=y +# CONFIG_FONT_6x8 is not set CONFIG_SG_POOL=y CONFIG_ARCH_STACKWALK=y CONFIG_STACKDEPOT=y -CONFIG_STACK_HASH_ORDER=20 CONFIG_SBITMAP=y # end of Library routines +CONFIG_GENERIC_IOREMAP=y CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y # @@ -6798,6 +6884,7 @@ CONFIG_DEBUG_BUGVERBOSE=y # # Compile-time checks and compiler options # +CONFIG_AS_HAS_NON_CONST_LEB128=y CONFIG_FRAME_WARN=2048 # CONFIG_STRIP_ASM_SYMS is not set # CONFIG_HEADERS_INSTALL is not set @@ -6841,6 +6928,7 @@ CONFIG_SLUB_DEBUG=y CONFIG_ARCH_HAS_DEBUG_WX=y # CONFIG_DEBUG_WX is not set CONFIG_GENERIC_PTDUMP=y +# CONFIG_SHRINKER_DEBUG is not set CONFIG_HAVE_DEBUG_KMEMLEAK=y CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y # CONFIG_DEBUG_VM_PGTABLE is not set @@ -6943,6 +7031,7 @@ CONFIG_RUNTIME_TESTING_MENU=y # CONFIG_TEST_BITMAP is not set # CONFIG_TEST_UUID is not set # CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set # CONFIG_TEST_RHASHTABLE is not set # CONFIG_TEST_SIPHASH is not set # CONFIG_TEST_IDA is not set @@ -6964,6 +7053,11 @@ CONFIG_RUNTIME_TESTING_MENU=y CONFIG_ARCH_USE_MEMTEST=y CONFIG_MEMTEST=y # end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking # end of Kernel hacking # diff --git a/sys-kernel/pinephone-sources/pinephone-sources-5.19.12.ebuild b/sys-kernel/pinephone-sources/pinephone-sources-6.1.4.ebuild similarity index 69% rename from sys-kernel/pinephone-sources/pinephone-sources-5.19.12.ebuild rename to sys-kernel/pinephone-sources/pinephone-sources-6.1.4.ebuild index 5a96132..8661dac 100644 --- a/sys-kernel/pinephone-sources/pinephone-sources-5.19.12.ebuild +++ b/sys-kernel/pinephone-sources/pinephone-sources-6.1.4.ebuild @@ -17,41 +17,33 @@ DEPEND="${RDEPEND} DESCRIPTION="Full sources for the Linux kernel, with megi's patch for pinephone and gentoo patchset" -MEGI_TAG="orange-pi-5.19-20220909-1622" +MEGI_TAG="orange-pi-6.1-20230104-1712" SRC_URI="https://github.com/megous/linux/archive/${MEGI_TAG}.tar.gz" PATCHES=( - #Patch kernel - ${FILESDIR}/5.19.8-9.patch - ${FILESDIR}/5.19.9-10.patch - ${FILESDIR}/5.19.10-11.patch - ${FILESDIR}/5.19.11-12.patch - + #Kernel patch + ${FILESDIR}/1003_linux-6.1.4.patch + #Gentoo Patches ${FILESDIR}/1500_XATTR_USER_PREFIX.patch - ${FILESDIR}/1510_fs-enable-link-security-restrictions-by-default.patch ${FILESDIR}/1700_sparc-address-warray-bound-warnings.patch ${FILESDIR}/2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch ${FILESDIR}/2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch + ${FILESDIR}/2910_bfp-mark-get-entry-ip-as--maybe-unused.patch ${FILESDIR}/2920_sign-file-patch-for-libressl.patch ${FILESDIR}/3000_Support-printing-firmware-info.patch ${FILESDIR}/4567_distro-Gentoo-Kconfig.patch ${FILESDIR}/5010_enable-cpu-optimizations-universal.patch - ${FILESDIR}/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch - ${FILESDIR}/5021_BMQ-and-PDS-gentoo-defaults.patch + ${FILESDIR}/5020_BMQ-and-PDS-io-scheduler-v6.1-r0.patch + ${FILESDIR}/5021_sched-alt-missing-rq-lock-irq-function.patch # Drop Megi's Modem-Power ${FILESDIR}/0101-arm64-dts-pinephone-drop-modem-power-node.patch ${FILESDIR}/0102-arm64-dts-pinephone-pro-remove-modem-node.patch - #PinePhone(Pro) Patches - ${FILESDIR}/0103-arm64-dts-rk3399-pinephone-pro-add-modem-RI-pin.patch - ${FILESDIR}/0103-ccu-sun50i-a64-reparent-clocks-to-lower-speed-oscillator.patch - ${FILESDIR}/0104-PPP-Add-reset-resume-to-usb_wwan.patch - ${FILESDIR}/0104-quirk-kernel-org-bug-210681-firmware_rome_error.patch - ${FILESDIR}/0105-leds-gpio-make-max_brightness-configurable.patch - ${FILESDIR}/0106-panic-led.patch - ${FILESDIR}/0106-sound-rockchip-i2s-Dont-disable-mclk-on-suspend.patch + # PinePhonePro + ${FILESDIR}/0103-arm64-dts-rk3399-pinephone-pro-add-modem-RI-pin.patch + ) S="${WORKDIR}/linux-${MEGI_TAG}"