diff --git a/sys-firmware/pinephone-firmware/Manifest b/sys-firmware/pinephone-firmware/Manifest
deleted file mode 100644
index c72116b..0000000
--- a/sys-firmware/pinephone-firmware/Manifest
+++ /dev/null
@@ -1,3 +0,0 @@
-DIST anx7688-fw.bin 220344 BLAKE2B ff4f3aa6a01c77c687edc4a8dcef8f32754838354497c5b92ec54564b95ed02e737b411d24d14b79c49b186271536e22b1ef6a65f7eda21ed2b55d4500a3bf6a SHA512 d17155a48ce0916d788f763e5a46e1a3a069970f3ec7d3404bc56a7da89d423682050ba7bf4993b222ace609202bdbddf42a0b4506c4193b03bc324c8cfe1be4
-DIST ov5640_af.bin 30768 BLAKE2B cfac7640de622e59cdae83d63ce853805ced6801c007ac124cd23e368fb3a0faae5dec6509b2b3cc6e5fc7fe607a547f1fb68f4c4e33b4f5f8014edd66d25970 SHA512 e5d6493c4b9714ec1b96d07efdfcf14b32aecdc1778993d97e71d7d9458c709211d0673788302a336b99a8de7287efe4df5ccd3bf617608efdf3d326d3b16153
-DIST rtl8723cs_xx-fw.bin 169150 BLAKE2B 9a99b21dca6b49acd90dd54cb7146c501f1a4dfc8246587383d5115d1b9400a271888a4cc3129573abce491a9980b1a0b30cc7f4b7ccccdf6866a399238f4aee SHA512 f77fef9729d9f2971458fda84d44cb7bc2c500eda0155f5f00cca7bc2bc85a17671255abdd000b505f82130fcfa99f3be963539f488b72cc7dd06eca46285878
diff --git a/sys-firmware/pinephone-firmware/metadata.xml b/sys-firmware/pinephone-firmware/metadata.xml
deleted file mode 100644
index 4fdf310..0000000
--- a/sys-firmware/pinephone-firmware/metadata.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-
- gjdijkman@gjdwebserver.nl
- Gerben Jan Dijkman
-
-
diff --git a/sys-firmware/pinephone-firmware/pinephone-firmware-20210809.ebuild b/sys-firmware/pinephone-firmware/pinephone-firmware-20210809.ebuild
deleted file mode 100644
index 6ab6556..0000000
--- a/sys-firmware/pinephone-firmware/pinephone-firmware-20210809.ebuild
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 1999-2021 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=7
-
-DESCRIPTION="Firmwares files for PinePhone"
-HOMEPAGE="https://xff.cz/git/linux-firmware"
-SRC_URI="https://xff.cz/git/linux-firmware/tree/ov5640_af.bin?id=4ec2645b007ba4c3f2962e38b50c06f274abbf7c -> ov5640_af.bin
-https://xff.cz/git/linux-firmware/tree/anx7688-fw.bin?id=4ec2645b007ba4c3f2962e38b50c06f274abbf7c -> anx7688-fw.bin
-https://xff.cz/git/linux-firmware/tree/rtl_bt/rtl8723cs_xx_fw.bin?id=4ec2645b007ba4c3f2962e38b50c06f274abbf7c -> rtl8723cs_xx-fw.bin
-"
-
-LICENSE="linux-fw-redistributable no-source-code"
-SLOT="0"
-KEYWORDS="~amd64 ~arm64"
-
-S="${WORKDIR}"
-
-src_install() {
- mkdir -p "${D}"/lib/firmware/ || die
- mkdir -p "${D}"/lib/firmware/rtl_bt || die
- insinto /lib/firmware/
- doins "${DISTDIR}"/anx7688-fw.bin
- doins "${DISTDIR}"/ov5640_af.bin
- insinto /lib/firmware/rtl_bt
- doins "${DISTDIR}"/rtl8723cs_xx-fw.bin
-}
diff --git a/sys-kernel/pinephone-sources/Manifest b/sys-kernel/pinephone-sources/Manifest
index e04795f..bf5dda8 100644
--- a/sys-kernel/pinephone-sources/Manifest
+++ b/sys-kernel/pinephone-sources/Manifest
@@ -1,5 +1,17 @@
-DIST all-5.13.5.patch 15071574 BLAKE2B f0b44888b216a60bb12a920a170ffb8ee705e357b82b0cacd58551e2d0e257c0f4419c34976263dc062335bb37f4b3a7418f3d9674e601fd8adda88bacad97d6 SHA512 046f42a5c8fe6477cdda82f47a07093ea51cf26b231b1c58230885954b7ecab9faa9eb72ac3c0cb1603dd6ca2b5b0d76421de6d2c3c05a0bee3ca6e080bfa084
-DIST all-5.15.0.patch 15521358 BLAKE2B 3979612d6c0b949cd1cc3a915a590d91ade30913b3daf9bbf3794a58c2139abf46e407ddee6641308d0265ebbeb675b051697a3cc18ec7b028cd7da9e06748b6 SHA512 3bcb9774c126fb704c028775856c5ba92760c313ade8301e99e46fed10a0e3848b5982d4ba4d989ed7f4a29a2e2d9c679105ea108719e87f18307e6f636a3a3c
+DIST all-5.11.18.patch 15061000 BLAKE2B aea03c5307da5417db752879318f64f9fedd9a4113d96c466c45f6a85fd357b1dcf57403dd16b849654a0ddabbf5305e45d842d5b8953034a084c69f822d97cd SHA512 ce2bf9bb95d5e0392839a4a46d3cd9e148d110f2c498afc2c285b7f61868c0a7da4e652476004362e3bb80eb943968df00112aa3d27ae0c3fb7d9977d30babc1
+DIST all-5.12.17.patch 15073524 BLAKE2B bd085b619a3bc1190cbfdc6bfd74cd6606f67f55106f89e0522619aaf69b672d68a7e93d843d7a4680529532e654d71becf4a7378d453fcc72978ee24d415b57 SHA512 e4bae86407767b685b3ec47f383d2db44281fe6a58914e4a117449c2ff96fe3c6341a7b59d1a92790c1565ecc3c14dacbbdc96fbc1b718ec7402c27c2dc37d0d
+DIST all-5.13.13.patch 15071574 BLAKE2B 539e30aac6e465c7e402fdd2d705fe187411f2a92ca84e8dfe1407d0913a5b137a26134cb3364fb4c0df55aaf233ba7ec39eb2b258b8af7926c37530a4dbab07 SHA512 63b7a2845eb8e5387d5085403fa8051240d7760652790133f73d491fed061ee213d5ceb6f3a2e77d93d2caebd205545c3fd96bc85d8c1b5fcb19b8ae622e2b66
+DIST all-5.14.17.patch 15283595 BLAKE2B d4e8635727bde7c61a13261be96fc2b0d4bf6acd48f6756d79e6caf193a0f05e6a5fc298933ca15906640cb371c79e35103436ae16590d9336f8ebd81b9b7c8e SHA512 ab87a6ec0fca1357cef3c84b7e0a180303eec968661143928c522aff3771e21c474aa568fef1d276c69ce0232e349c9b6732173517e8aab43c43db04cb0e96df
+DIST all-5.15.1.patch 15521358 BLAKE2B 3979612d6c0b949cd1cc3a915a590d91ade30913b3daf9bbf3794a58c2139abf46e407ddee6641308d0265ebbeb675b051697a3cc18ec7b028cd7da9e06748b6 SHA512 3bcb9774c126fb704c028775856c5ba92760c313ade8301e99e46fed10a0e3848b5982d4ba4d989ed7f4a29a2e2d9c679105ea108719e87f18307e6f636a3a3c
+DIST all-5.15.2.patch 15521358 BLAKE2B 3979612d6c0b949cd1cc3a915a590d91ade30913b3daf9bbf3794a58c2139abf46e407ddee6641308d0265ebbeb675b051697a3cc18ec7b028cd7da9e06748b6 SHA512 3bcb9774c126fb704c028775856c5ba92760c313ade8301e99e46fed10a0e3848b5982d4ba4d989ed7f4a29a2e2d9c679105ea108719e87f18307e6f636a3a3c
+DIST linux-5.11.tar.xz 117619104 BLAKE2B 81300c27bd5476387a83123aaeb4163c73eb61e9245806c23660cb5e6a4fa88ffc9def027031335fa0270fc4080506cd415990014364e3a98b9d2e8c58a29524 SHA512 a567ec133018bb5ec00c60281479b466c26e02137a93a9c690e83997947df02b6fd94e76e8df748f6d70ceb58a19bacc3b1467de10b7a1fad2763db32b3f1330
+DIST linux-5.12.tar.xz 118112412 BLAKE2B 842d921b9a73d2aaade763dbd2ec67bdfe0275baa6d628b775f5c87574ad7dc86f0419afcd48c10c1235f4bffa16084243f2cf4556e6afcd391e975fe8ba530b SHA512 be03b6fee1d1ea8087b09874d27c0a602c0b04fd90ad38b975bd2c8455a07e83c29b56814aaf1389e82305fae0e4c2d1701075a7f0a7295dd28149f967ec5b3d
DIST linux-5.13.tar.xz 119297284 BLAKE2B 9c4c12e2394dec064adff51f7ccdf389192eb27ba7906db5eda543afe3d04afca6b9ea0848a057571bf2534eeb98e1e3a67734deff82c0d3731be205ad995668 SHA512 a8edf97e9d38a49f1be2bde1e29ad96274bb2c6f7e8a2bebaa1161dd4df9cabcbaec4ff644c45bee94f86ae47725087d6deed0cd954209cec717621d137db85e
+DIST linux-5.14.tar.xz 120669872 BLAKE2B 0047f5aaa3940dff97f4055ef544faafbbb5282128e6afe21d2f47d8dc8c395806a17016febfa050117d16f59e74b882cb8b9c5011d68f119c230d0a4d120524 SHA512 8e4f3ec3d36f774280f75dc7b004a43e09417af58f12e9c9f8348976659d4cfda7ad905f306f43fed66a27922e5c45db22e46bbfa7a0b9f365012380de3b6f64
DIST linux-5.15.tar.xz 121913744 BLAKE2B 3921274b23f7938abdf3ed9334534b4581e13d7484303d3a5280eddb038999aaa8b836666a487472d9c4a219af0f06b9fecccaf348fb5510ab8762f4ef4b7e83 SHA512 d25ad40b5bcd6a4c6042fd0fd84e196e7a58024734c3e9a484fd0d5d54a0c1d87db8a3c784eff55e43b6f021709dc685eb0efa18d2aec327e4f88a79f405705a
-DIST patch-5.13.5.xz 473120 BLAKE2B a0dd9f3f972a16de87f0d2d8daa7f5d35b27314d22597a28f471cdbe6cedfa7d4bf69e41504d6a9b9d4c1f085146604394747771185dd0a09276cfd92820b4a8 SHA512 1e4eb575775ccbc2e88b34b902a75562e49d6dfb4699dadd5b41fff9db8c2bc994d946d1e60f6320f48ef233aa721d3725582d4ec57458f2293da9a85806c7b1
+DIST patch-5.11.18.xz 651148 BLAKE2B d09300b9e6da7fe55ebdd5f28179671158e6f50ed96924a74294f3c2fbdc1a385b9ef18d4894be0daaa3bd55738082d5101b35823e9ccf486d46a7be8c39abb6 SHA512 a987b45276d7c78df4ed2414b1b13db9f87b0dc958fd30087b9580eb6f6bf0e8b2fdcdfe1569014cc785582e01b5680c907628c39b92190fd4f2fcbb44ee5cac
+DIST patch-5.12.17.xz 889940 BLAKE2B 78c10887c9c28261f5a99c3ed19c9390d8af92ee49dd4dd13092493e71c446fa58b9025eecb3ea8f605f1c3644c5ef85a25c056a4fbe143eb91f49eb9b2b866b SHA512 1c42b4efa3d26fb4bdc96c422a32646b2d9018cb84d231ccae4c64e1cdd342ed43828c6da3eb40cbe80550734254b8e824cec98cb1635d47baef75bae37bfd31
+DIST patch-5.13.13.xz 688948 BLAKE2B a4cb958a472a53fddbd7e82117cb929acc614c5d50c08a4f2de7a209bfb2da9a5978e551694afa363e3f521f467b9d5f1cbbc77832df86bbcd624e8490ff7bea SHA512 5d311d74c30da6fcd8057a372fb467bf646888ab9c6d36144c1f0a68c2910b1e1315dc44ad1a7b7a8c82d03e109483e72d454f7a217e64a27b5df9124204be77
+DIST patch-5.14.17.xz 606140 BLAKE2B dec4a45cbd9232860f91797559a736a33f1f448ebeba6edd7282e01f9e38c412e740375d25acd44bced992b04316d2ec67eea4bf3711e762b1d06fde32db6b7c SHA512 841978b7ced5c314593999f31200c89da14dbd4f61c7f2c2edc4a5ac1b296d5ef932710a7f5c767d14fdcae6f1566b7eb92dfbdf486c924b648ab898d877abae
+DIST patch-5.15.1.xz 5736 BLAKE2B 0f45917dd16d683c1b4a316c7205f24e4d037477a0fe54a153a3d05d5ff390585d8e35e8588359a0a3745b08daa1752701e33b5986dc3445c9598270e5635db2 SHA512 8f7faca2e0e5c755b052b65b2aa46fee0317c915ec2475e97371d5da5e3adfa943397108fa40ebbac581254933f4ffdc6875306120c8c208561556fdd8bfb4ff
+DIST patch-5.15.2.xz 17244 BLAKE2B 769ef83b6613d865b420d048c25ac1df4c2f88f7ae580b373f874d312720bad877e561756943c9833535a94e5621922bba24cb1b804a1540f2e67cfa23f1a1aa SHA512 5f0123bdc7c9875e7b3f02a89496a8a1e0808d77dc58fb725e250d93d69510a1ef6462cfb38cb38e78e20ca34fd7446f58327cad5e67fc68ec36d15777048edf
diff --git a/sys-kernel/pinephone-sources/files/0001-arm64-dts-allwinner-pinephone-stop-LEDs-on-suspend.patch b/sys-kernel/pinephone-sources/files/0001-arm64-dts-allwinner-pinephone-stop-LEDs-on-suspend.patch
new file mode 100644
index 0000000..331ca3e
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0001-arm64-dts-allwinner-pinephone-stop-LEDs-on-suspend.patch
@@ -0,0 +1,40 @@
+From 7045054c96224ead00aae09246f475dfe6202def Mon Sep 17 00:00:00 2001
+From: Danct12
+Date: Tue, 19 Jan 2021 10:09:01 +0700
+Subject: [PATCH] arm64: dts: allwinner: pinephone: stop LEDs on suspend
+
+Signed-off-by: Danct12
+---
+ arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+index 02d82980c..00ed866ae 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+@@ -218,14 +218,12 @@
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 20 GPIO_ACTIVE_HIGH>; /* PD20 */
+- retain-state-suspended;
+ };
+
+ green {
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 18 GPIO_ACTIVE_HIGH>; /* PD18 */
+- retain-state-suspended;
+ };
+
+ red {
+@@ -233,7 +231,6 @@
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */
+- retain-state-suspended;
+ };
+ };
+
+--
+2.30.0
+
diff --git a/sys-kernel/pinephone-sources/files/0001-bootsplash.patch b/sys-kernel/pinephone-sources/files/0001-bootsplash.patch
new file mode 100644
index 0000000..d5835e5
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0001-bootsplash.patch
@@ -0,0 +1,746 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index a74227ad082e..b5633b56391e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2705,6 +2705,14 @@ S: Supported
+ F: drivers/net/bonding/
+ F: include/uapi/linux/if_bonding.h
+
++BOOTSPLASH
++M: Max Staudt
++L: linux-fbdev@vger.kernel.org
++S: Maintained
++F: drivers/video/fbdev/core/bootsplash*.*
++F: drivers/video/fbdev/core/dummycon.c
++F: include/linux/bootsplash.h
++
+ BPF (Safe dynamic programs and tools)
+ M: Alexei Starovoitov
+ M: Daniel Borkmann
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index 7f1f1fbcef9e..f3ff976266fe 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -151,6 +151,30 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+ such that other users of the framebuffer will remain normally
+ oriented.
+
++config BOOTSPLASH
++ bool "Bootup splash screen"
++ depends on FRAMEBUFFER_CONSOLE
++ help
++ This option enables the Linux bootsplash screen.
++
++ The bootsplash is a full-screen logo or animation indicating a
++ booting system. It replaces the classic scrolling text with a
++ graphical alternative, similar to other systems.
++
++ Since this is technically implemented as a hook on top of fbcon,
++ it can only work if the FRAMEBUFFER_CONSOLE is enabled and a
++ framebuffer driver is active. Thus, to get a text-free boot,
++ the system needs to boot with vesafb, efifb, or similar.
++
++ Once built into the kernel, the bootsplash needs to be enabled
++ with bootsplash.enabled=1 and a splash file needs to be supplied.
++
++ Further documentation can be found in:
++ Documentation/fb/bootsplash.txt
++
++ If unsure, say N.
++ This is typically used by distributors and system integrators.
++
+ config STI_CONSOLE
+ bool "STI text console"
+ depends on PARISC
+diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
+index 73493bbd7a15..66895321928e 100644
+--- a/drivers/video/fbdev/core/Makefile
++++ b/drivers/video/fbdev/core/Makefile
+@@ -29,3 +29,6 @@ obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
+ obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
+ obj-$(CONFIG_FB_SVGALIB) += svgalib.o
+ obj-$(CONFIG_FB_DDC) += fb_ddc.o
++
++obj-$(CONFIG_BOOTSPLASH) += bootsplash.o bootsplash_render.o \
++ dummyblit.o
+diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c
+new file mode 100644
+index 000000000000..e449755af268
+--- /dev/null
++++ b/drivers/video/fbdev/core/bootsplash.c
+@@ -0,0 +1,294 @@
++/*
++ * Kernel based bootsplash.
++ *
++ * (Main file: Glue code, workers, timer, PM, kernel and userland API)
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * SPDX-License-Identifier: GPL-2.0
++ */
++
++#define pr_fmt(fmt) "bootsplash: " fmt
++
++
++#include
++#include
++#include
++#include /* dev_warn() */
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include /* console_blanked */
++#include
++#include
++#include
++#include
++#include
++
++#include "bootsplash_internal.h"
++
++
++/*
++ * We only have one splash screen, so let's keep a single
++ * instance of the internal state.
++ */
++static struct splash_priv splash_state;
++
++
++static void splash_callback_redraw_vc(struct work_struct *ignored)
++{
++ if (console_blanked)
++ return;
++
++ console_lock();
++ if (vc_cons[fg_console].d)
++ update_screen(vc_cons[fg_console].d);
++ console_unlock();
++}
++
++
++static bool is_fb_compatible(const struct fb_info *info)
++{
++ if (!(info->flags & FBINFO_BE_MATH)
++ != !fb_be_math((struct fb_info *)info)) {
++ dev_warn(info->device,
++ "Can't draw on foreign endianness framebuffer.\n");
++
++ return false;
++ }
++
++ if (info->flags & FBINFO_MISC_TILEBLITTING) {
++ dev_warn(info->device,
++ "Can't draw splash on tiling framebuffer.\n");
++
++ return false;
++ }
++
++ if (info->fix.type != FB_TYPE_PACKED_PIXELS
++ || (info->fix.visual != FB_VISUAL_TRUECOLOR
++ && info->fix.visual != FB_VISUAL_DIRECTCOLOR)) {
++ dev_warn(info->device,
++ "Can't draw splash on non-packed or non-truecolor framebuffer.\n");
++
++ dev_warn(info->device,
++ " type: %u visual: %u\n",
++ info->fix.type, info->fix.visual);
++
++ return false;
++ }
++
++ if (info->var.bits_per_pixel != 16
++ && info->var.bits_per_pixel != 24
++ && info->var.bits_per_pixel != 32) {
++ dev_warn(info->device,
++ "We only support drawing on framebuffers with 16, 24, or 32 bpp, not %d.\n",
++ info->var.bits_per_pixel);
++
++ return false;
++ }
++
++ return true;
++}
++
++
++/*
++ * Called by fbcon_switch() when an instance is activated or refreshed.
++ */
++void bootsplash_render_full(struct fb_info *info)
++{
++ if (!is_fb_compatible(info))
++ return;
++
++ bootsplash_do_render_background(info);
++}
++
++
++/*
++ * External status enquiry and on/off switch
++ */
++bool bootsplash_would_render_now(void)
++{
++ return !oops_in_progress
++ && !console_blanked
++ && bootsplash_is_enabled();
++}
++
++bool bootsplash_is_enabled(void)
++{
++ bool was_enabled;
++
++ /* Make sure we have the newest state */
++ smp_rmb();
++
++ was_enabled = test_bit(0, &splash_state.enabled);
++
++ return was_enabled;
++}
++
++void bootsplash_disable(void)
++{
++ int was_enabled;
++
++ was_enabled = test_and_clear_bit(0, &splash_state.enabled);
++
++ if (was_enabled) {
++ if (oops_in_progress) {
++ /* Redraw screen now so we can see a panic */
++ if (vc_cons[fg_console].d)
++ update_screen(vc_cons[fg_console].d);
++ } else {
++ /* No urgency, redraw at next opportunity */
++ schedule_work(&splash_state.work_redraw_vc);
++ }
++ }
++}
++
++void bootsplash_enable(void)
++{
++ bool was_enabled;
++
++ if (oops_in_progress)
++ return;
++
++ was_enabled = test_and_set_bit(0, &splash_state.enabled);
++
++ if (!was_enabled)
++ schedule_work(&splash_state.work_redraw_vc);
++}
++
++
++/*
++ * Userland API via platform device in sysfs
++ */
++static ssize_t splash_show_enabled(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", bootsplash_is_enabled());
++}
++
++static ssize_t splash_store_enabled(struct device *device,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ bool enable;
++ int err;
++
++ if (!buf || !count)
++ return -EFAULT;
++
++ err = kstrtobool(buf, &enable);
++ if (err)
++ return err;
++
++ if (enable)
++ bootsplash_enable();
++ else
++ bootsplash_disable();
++
++ return count;
++}
++
++static DEVICE_ATTR(enabled, 0644, splash_show_enabled, splash_store_enabled);
++
++
++static struct attribute *splash_dev_attrs[] = {
++ &dev_attr_enabled.attr,
++ NULL
++};
++
++ATTRIBUTE_GROUPS(splash_dev);
++
++
++
++
++/*
++ * Power management fixup via platform device
++ *
++ * When the system is woken from sleep or restored after hibernating, we
++ * cannot expect the screen contents to still be present in video RAM.
++ * Thus, we have to redraw the splash if we're currently active.
++ */
++static int splash_resume(struct device *device)
++{
++ if (bootsplash_would_render_now())
++ schedule_work(&splash_state.work_redraw_vc);
++
++ return 0;
++}
++
++static int splash_suspend(struct device *device)
++{
++ cancel_work_sync(&splash_state.work_redraw_vc);
++
++ return 0;
++}
++
++
++static const struct dev_pm_ops splash_pm_ops = {
++ .thaw = splash_resume,
++ .restore = splash_resume,
++ .resume = splash_resume,
++ .suspend = splash_suspend,
++ .freeze = splash_suspend,
++};
++
++static struct platform_driver splash_driver = {
++ .driver = {
++ .name = "bootsplash",
++ .pm = &splash_pm_ops,
++ },
++};
++
++
++/*
++ * Main init
++ */
++void bootsplash_init(void)
++{
++ int ret;
++
++ /* Initialized already? */
++ if (splash_state.splash_device)
++ return;
++
++
++ /* Register platform device to export user API */
++ ret = platform_driver_register(&splash_driver);
++ if (ret) {
++ pr_err("platform_driver_register() failed: %d\n", ret);
++ goto err;
++ }
++
++ splash_state.splash_device
++ = platform_device_alloc("bootsplash", 0);
++
++ if (!splash_state.splash_device)
++ goto err_driver;
++
++ splash_state.splash_device->dev.groups = splash_dev_groups;
++
++ ret = platform_device_add(splash_state.splash_device);
++ if (ret) {
++ pr_err("platform_device_add() failed: %d\n", ret);
++ goto err_device;
++ }
++
++
++ INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc);
++
++ return;
++
++err_device:
++ platform_device_put(splash_state.splash_device);
++ splash_state.splash_device = NULL;
++err_driver:
++ platform_driver_unregister(&splash_driver);
++err:
++ pr_err("Failed to initialize.\n");
++}
+diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h
+new file mode 100644
+index 000000000000..b11da5cb90bf
+--- /dev/null
++++ b/drivers/video/fbdev/core/bootsplash_internal.h
+@@ -0,0 +1,55 @@
++/*
++ * Kernel based bootsplash.
++ *
++ * (Internal data structures used at runtime)
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * SPDX-License-Identifier: GPL-2.0
++ */
++
++#ifndef __BOOTSPLASH_INTERNAL_H
++#define __BOOTSPLASH_INTERNAL_H
++
++
++#include
++#include
++#include
++#include
++#include
++
++
++/*
++ * Runtime types
++ */
++struct splash_priv {
++ /*
++ * Enabled/disabled state, to be used with atomic bit operations.
++ * Bit 0: 0 = Splash hidden
++ * 1 = Splash shown
++ *
++ * Note: fbcon.c uses this twice, by calling
++ * bootsplash_would_render_now() in set_blitting_type() and
++ * in fbcon_switch().
++ * This is racy, but eventually consistent: Turning the
++ * splash on/off will cause a redraw, which calls
++ * fbcon_switch(), which calls set_blitting_type().
++ * So the last on/off toggle will make things consistent.
++ */
++ unsigned long enabled;
++
++ /* Our gateway to userland via sysfs */
++ struct platform_device *splash_device;
++
++ struct work_struct work_redraw_vc;
++};
++
++
++
++/*
++ * Rendering functions
++ */
++void bootsplash_do_render_background(struct fb_info *info);
++
++#endif
+diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c
+new file mode 100644
+index 000000000000..4d7e0117f653
+--- /dev/null
++++ b/drivers/video/fbdev/core/bootsplash_render.c
+@@ -0,0 +1,93 @@
++/*
++ * Kernel based bootsplash.
++ *
++ * (Rendering functions)
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * SPDX-License-Identifier: GPL-2.0
++ */
++
++#define pr_fmt(fmt) "bootsplash: " fmt
++
++
++#include
++#include
++#include
++#include
++#include
++
++#include "bootsplash_internal.h"
++
++
++
++
++/*
++ * Rendering: Internal drawing routines
++ */
++
++
++/*
++ * Pack pixel into target format and do Big/Little Endian handling.
++ * This would be a good place to handle endianness conversion if necessary.
++ */
++static inline u32 pack_pixel(const struct fb_var_screeninfo *dst_var,
++ u8 red, u8 green, u8 blue)
++{
++ u32 dstpix;
++
++ /* Quantize pixel */
++ red = red >> (8 - dst_var->red.length);
++ green = green >> (8 - dst_var->green.length);
++ blue = blue >> (8 - dst_var->blue.length);
++
++ /* Pack pixel */
++ dstpix = red << (dst_var->red.offset)
++ | green << (dst_var->green.offset)
++ | blue << (dst_var->blue.offset);
++
++ /*
++ * Move packed pixel to the beginning of the memory cell,
++ * so we can memcpy() it out easily
++ */
++#ifdef __BIG_ENDIAN
++ switch (dst_var->bits_per_pixel) {
++ case 16:
++ dstpix <<= 16;
++ break;
++ case 24:
++ dstpix <<= 8;
++ break;
++ case 32:
++ break;
++ }
++#else
++ /* This is intrinsically unnecessary on Little Endian */
++#endif
++
++ return dstpix;
++}
++
++
++void bootsplash_do_render_background(struct fb_info *info)
++{
++ unsigned int x, y;
++ u32 dstpix;
++ u32 dst_octpp = info->var.bits_per_pixel / 8;
++
++ dstpix = pack_pixel(&info->var,
++ 0,
++ 0,
++ 0);
++
++ for (y = 0; y < info->var.yres_virtual; y++) {
++ u8 *dstline = info->screen_buffer + (y * info->fix.line_length);
++
++ for (x = 0; x < info->var.xres_virtual; x++) {
++ memcpy(dstline, &dstpix, dst_octpp);
++
++ dstline += dst_octpp;
++ }
++ }
++}
+diff --git a/drivers/video/fbdev/core/dummyblit.c b/drivers/video/fbdev/core/dummyblit.c
+new file mode 100644
+index 000000000000..8c22ff92ce24
+--- /dev/null
++++ b/drivers/video/fbdev/core/dummyblit.c
+@@ -0,0 +1,89 @@
++/*
++ * linux/drivers/video/fbdev/core/dummyblit.c -- Dummy Blitting Operation
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * These functions are used in place of blitblit/tileblit to suppress
++ * fbcon's text output while a splash is shown.
++ *
++ * Only suppressing actual rendering keeps the text buffer in the VC layer
++ * intact and makes it easy to switch back from the bootsplash to a full
++ * text console with a simple redraw (with the original functions in place).
++ *
++ * Based on linux/drivers/video/fbdev/core/bitblit.c
++ * and linux/drivers/video/fbdev/core/tileblit.c
++ *
++ * SPDX-License-Identifier: GPL-2.0
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include "fbcon.h"
++
++static void dummy_bmove(struct vc_data *vc, struct fb_info *info, int sy,
++ int sx, int dy, int dx, int height, int width)
++{
++ ;
++}
++
++static void dummy_clear(struct vc_data *vc, struct fb_info *info, int sy,
++ int sx, int height, int width)
++{
++ ;
++}
++
++static void dummy_putcs(struct vc_data *vc, struct fb_info *info,
++ const unsigned short *s, int count, int yy, int xx,
++ int fg, int bg)
++{
++ ;
++}
++
++static void dummy_clear_margins(struct vc_data *vc, struct fb_info *info,
++ int color, int bottom_only)
++{
++ ;
++}
++
++static void dummy_cursor(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg)
++{
++ ;
++}
++
++static int dummy_update_start(struct fb_info *info)
++{
++ /*
++ * Copied from bitblit.c and tileblit.c
++ *
++ * As of Linux 4.12, nobody seems to care about our return value.
++ */
++ struct fbcon_ops *ops = info->fbcon_par;
++ int err;
++
++ err = fb_pan_display(info, &ops->var);
++ ops->var.xoffset = info->var.xoffset;
++ ops->var.yoffset = info->var.yoffset;
++ ops->var.vmode = info->var.vmode;
++ return err;
++}
++
++void fbcon_set_dummyops(struct fbcon_ops *ops)
++{
++ ops->bmove = dummy_bmove;
++ ops->clear = dummy_clear;
++ ops->putcs = dummy_putcs;
++ ops->clear_margins = dummy_clear_margins;
++ ops->cursor = dummy_cursor;
++ ops->update_start = dummy_update_start;
++ ops->rotate_font = NULL;
++}
++EXPORT_SYMBOL_GPL(fbcon_set_dummyops);
++
++MODULE_AUTHOR("Max Staudt ");
++MODULE_DESCRIPTION("Dummy Blitting Operation");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 04612f938bab..9a39a6fcfe98 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -80,6 +80,7 @@
+ #include
+
+ #include "fbcon.h"
++#include
+
+ #ifdef FBCONDEBUG
+ # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -542,6 +543,8 @@ static int do_fbcon_takeover(int show_logo)
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map[i] = info_idx;
+
++ bootsplash_init();
++
+ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+@@ -661,6 +664,9 @@ static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
+ else {
+ fbcon_set_rotation(info);
+ fbcon_set_bitops(ops);
++
++ if (bootsplash_would_render_now())
++ fbcon_set_dummyops(ops);
+ }
+ }
+
+@@ -683,6 +689,19 @@ static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
+ ops->p = &fb_display[vc->vc_num];
+ fbcon_set_rotation(info);
+ fbcon_set_bitops(ops);
++
++ /*
++ * Note:
++ * This is *eventually correct*.
++ * Setting the fbcon operations and drawing the splash happen at
++ * different points in time. If the splash is enabled/disabled
++ * in between, then bootsplash_{en,dis}able will schedule a
++ * redraw, which will again render the splash (or not) and set
++ * the correct fbcon ops.
++ * The last run will then be the right one.
++ */
++ if (bootsplash_would_render_now())
++ fbcon_set_dummyops(ops);
+ }
+
+ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
+@@ -2184,6 +2203,9 @@ static int fbcon_switch(struct vc_data *vc)
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
+
++ if (bootsplash_would_render_now())
++ bootsplash_render_full(info);
++
+ if (softback_top) {
+ if (softback_lines)
+ fbcon_set_origin(vc);
+diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
+index 18f3ac144237..45f94347fe5e 100644
+--- a/drivers/video/fbdev/core/fbcon.h
++++ b/drivers/video/fbdev/core/fbcon.h
+@@ -214,6 +214,11 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
+ #define SCROLL_REDRAW 0x004
+ #define SCROLL_PAN_REDRAW 0x005
+
++#ifdef CONFIG_BOOTSPLASH
++extern void fbcon_set_dummyops(struct fbcon_ops *ops);
++#else /* CONFIG_BOOTSPLASH */
++#define fbcon_set_dummyops(x)
++#endif /* CONFIG_BOOTSPLASH */
+ #ifdef CONFIG_FB_TILEBLITTING
+ extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
+ #endif
+diff --git a/include/linux/bootsplash.h b/include/linux/bootsplash.h
+new file mode 100644
+index 000000000000..c6dd0b43180d
+--- /dev/null
++++ b/include/linux/bootsplash.h
+@@ -0,0 +1,43 @@
++/*
++ * Kernel based bootsplash.
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * SPDX-License-Identifier: GPL-2.0
++ */
++
++#ifndef __LINUX_BOOTSPLASH_H
++#define __LINUX_BOOTSPLASH_H
++
++#include
++
++
++#ifdef CONFIG_BOOTSPLASH
++
++extern void bootsplash_render_full(struct fb_info *info);
++
++extern bool bootsplash_would_render_now(void);
++
++extern bool bootsplash_is_enabled(void);
++extern void bootsplash_disable(void);
++extern void bootsplash_enable(void);
++
++extern void bootsplash_init(void);
++
++#else /* CONFIG_BOOTSPLASH */
++
++#define bootsplash_render_full(x)
++
++#define bootsplash_would_render_now() (false)
++
++#define bootsplash_is_enabled() (false)
++#define bootsplash_disable()
++#define bootsplash_enable()
++
++#define bootsplash_init()
++
++#endif /* CONFIG_BOOTSPLASH */
++
++
++#endif
diff --git a/sys-kernel/pinephone-sources/files/0001-drivers-usb-add-reset_resume-callback.patch b/sys-kernel/pinephone-sources/files/0001-drivers-usb-add-reset_resume-callback.patch
new file mode 100644
index 0000000..c232f58
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0001-drivers-usb-add-reset_resume-callback.patch
@@ -0,0 +1,24 @@
+From 22008251d617054271a65f29178e2df74dd3e33c Mon Sep 17 00:00:00 2001
+From: Bhushan Shah
+Date: Fri, 9 Apr 2021 16:22:49 +0530
+Subject: [PATCH 1/5] drivers/usb: add reset_resume callback
+
+---
+ drivers/usb/serial/option.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c6969ca728390..21aeb7dc8f6ee 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2105,6 +2105,7 @@ static struct usb_serial_driver option_1port_device = {
+ #ifdef CONFIG_PM
+ .suspend = usb_wwan_suspend,
+ .resume = usb_wwan_resume,
++ .reset_resume = usb_wwan_resume,
+ #endif
+ };
+
+--
+2.31.1
+
diff --git a/sys-kernel/pinephone-sources/files/0001-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch b/sys-kernel/pinephone-sources/files/0001-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch
new file mode 100644
index 0000000..e7d4da5
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0001-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch
@@ -0,0 +1,150 @@
+--- b/drivers/video/fbdev/core/bitblit.c
++++ a/drivers/video/fbdev/core/bitblit.c
+@@ -234,7 +234,7 @@
+ }
+
+ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg)
+- int fg, int bg)
+ {
+ struct fb_cursor cursor;
+ struct fbcon_ops *ops = info->fbcon_par;
+@@ -247,6 +247,15 @@
+
+ cursor.set = 0;
+
++ if (softback_lines) {
++ if (y + softback_lines >= vc->vc_rows) {
++ mode = CM_ERASE;
++ ops->cursor_flash = 0;
++ return;
++ } else
++ y += softback_lines;
++ }
++
+ c = scr_readw((u16 *) vc->vc_pos);
+ attribute = get_attribute(info, c);
+ src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
+--- b/drivers/video/fbdev/core/fbcon.c
++++ a/drivers/video/fbdev/core/fbcon.c
+@@ -394,7 +394,7 @@
+ c = scr_readw((u16 *) vc->vc_pos);
+ mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
+ CM_ERASE : CM_DRAW;
++ ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1),
+- ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ console_unlock();
+ }
+@@ -1345,7 +1345,7 @@
+
+ ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+
++ ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1),
+- ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ }
+
+--- b/drivers/video/fbdev/core/fbcon.h
++++ a/drivers/video/fbdev/core/fbcon.h
+@@ -62,7 +62,7 @@
+ void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
+ int color, int bottom_only);
+ void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg);
+- int fg, int bg);
+ int (*update_start)(struct fb_info *info);
+ int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
+ struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
+--- b/drivers/video/fbdev/core/fbcon_ccw.c
++++ a/drivers/video/fbdev/core/fbcon_ccw.c
+@@ -219,7 +219,7 @@
+ }
+
+ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg)
+- int fg, int bg)
+ {
+ struct fb_cursor cursor;
+ struct fbcon_ops *ops = info->fbcon_par;
+@@ -236,6 +236,15 @@
+
+ cursor.set = 0;
+
++ if (softback_lines) {
++ if (y + softback_lines >= vc->vc_rows) {
++ mode = CM_ERASE;
++ ops->cursor_flash = 0;
++ return;
++ } else
++ y += softback_lines;
++ }
++
+ c = scr_readw((u16 *) vc->vc_pos);
+ attribute = get_attribute(info, c);
+ src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
+--- b/drivers/video/fbdev/core/fbcon_cw.c
++++ a/drivers/video/fbdev/core/fbcon_cw.c
+@@ -202,7 +202,7 @@
+ }
+
+ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg)
+- int fg, int bg)
+ {
+ struct fb_cursor cursor;
+ struct fbcon_ops *ops = info->fbcon_par;
+@@ -219,6 +219,15 @@
+
+ cursor.set = 0;
+
++ if (softback_lines) {
++ if (y + softback_lines >= vc->vc_rows) {
++ mode = CM_ERASE;
++ ops->cursor_flash = 0;
++ return;
++ } else
++ y += softback_lines;
++ }
++
+ c = scr_readw((u16 *) vc->vc_pos);
+ attribute = get_attribute(info, c);
+ src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
+--- b/drivers/video/fbdev/core/fbcon_ud.c
++++ a/drivers/video/fbdev/core/fbcon_ud.c
+@@ -249,7 +249,7 @@
+ }
+
+ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg)
+- int fg, int bg)
+ {
+ struct fb_cursor cursor;
+ struct fbcon_ops *ops = info->fbcon_par;
+@@ -267,6 +267,15 @@
+
+ cursor.set = 0;
+
++ if (softback_lines) {
++ if (y + softback_lines >= vc->vc_rows) {
++ mode = CM_ERASE;
++ ops->cursor_flash = 0;
++ return;
++ } else
++ y += softback_lines;
++ }
++
+ c = scr_readw((u16 *) vc->vc_pos);
+ attribute = get_attribute(info, c);
+ src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
+--- b/drivers/video/fbdev/core/tileblit.c
++++ a/drivers/video/fbdev/core/tileblit.c
+@@ -80,7 +80,7 @@
+ }
+
+ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
++ int softback_lines, int fg, int bg)
+- int fg, int bg)
+ {
+ struct fb_tilecursor cursor;
+ int use_sw = (vc->vc_cursor_type & 0x10);
diff --git a/sys-kernel/pinephone-sources/files/0002-Bluetooth-Fix-LL-PRivacy-BLE-device-fails-to-connect.patch b/sys-kernel/pinephone-sources/files/0002-Bluetooth-Fix-LL-PRivacy-BLE-device-fails-to-connect.patch
new file mode 100644
index 0000000..8707046
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0002-Bluetooth-Fix-LL-PRivacy-BLE-device-fails-to-connect.patch
@@ -0,0 +1,67 @@
+From 27061f0b322a585c30db111719f89c23c15a88b4 Mon Sep 17 00:00:00 2001
+From: Sathish Narasimman
+Date: Thu, 29 Oct 2020 13:18:21 +0530
+Subject: Bluetooth: Fix: LL PRivacy BLE device fails to connect
+
+When adding device to white list the device is added to resolving list
+also. It has to be added only when HCI_ENABLE_LL_PRIVACY flag is set.
+HCI_ENABLE_LL_PRIVACY flag has to be tested before adding/deleting devices
+to resolving list. use_ll_privacy macro is used only to check if controller
+supports LL_Privacy.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=209745
+
+Fixes: 0eee35bdfa3b ("Bluetooth: Update resolving list when updating whitelist")
+Signed-off-by: Sathish Narasimman
+Signed-off-by: Marcel Holtmann
+---
+ net/bluetooth/hci_request.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index e0269192f2e5..a565c91b8599 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -698,7 +698,8 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
+ cp.bdaddr_type);
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
+
+- if (use_ll_privacy(req->hdev)) {
++ if (use_ll_privacy(req->hdev) &&
++ hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
+ struct smp_irk *irk;
+
+ irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
+@@ -732,7 +733,8 @@ static int add_to_white_list(struct hci_request *req,
+ return -1;
+
+ /* White list can not be used with RPAs */
+- if (!allow_rpa && !use_ll_privacy(hdev) &&
++ if (!allow_rpa &&
++ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
+ hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
+ return -1;
+ }
+@@ -750,7 +752,8 @@ static int add_to_white_list(struct hci_request *req,
+ cp.bdaddr_type);
+ hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+
+- if (use_ll_privacy(hdev)) {
++ if (use_ll_privacy(hdev) &&
++ hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
+ struct smp_irk *irk;
+
+ irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
+@@ -812,7 +815,8 @@ static u8 update_white_list(struct hci_request *req)
+ }
+
+ /* White list can not be used with RPAs */
+- if (!allow_rpa && !use_ll_privacy(hdev) &&
++ if (!allow_rpa &&
++ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
+ hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
+ return 0x00;
+ }
+--
+cgit v1.2.3-1-gf6bb5
+
diff --git a/sys-kernel/pinephone-sources/files/0002-Revert-usb-quirks-Add-USB_QUIRK_RESET-for-Quectel-EG.patch b/sys-kernel/pinephone-sources/files/0002-Revert-usb-quirks-Add-USB_QUIRK_RESET-for-Quectel-EG.patch
new file mode 100644
index 0000000..f7f97f0
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0002-Revert-usb-quirks-Add-USB_QUIRK_RESET-for-Quectel-EG.patch
@@ -0,0 +1,28 @@
+From 9d662fb865ae496a7eb51d2bdddefd2427d9a30e Mon Sep 17 00:00:00 2001
+From: Bhushan Shah
+Date: Fri, 9 Apr 2021 16:25:25 +0530
+Subject: [PATCH 2/5] Revert "usb: quirks: Add USB_QUIRK_RESET for Quectel
+ EG25G Modem"
+
+Reverts 8cc2a406ecc711f5
+---
+ drivers/usb/core/quirks.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index cb556617aa34f..6ade3daf78584 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -501,9 +501,6 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* INTEL VALUE SSD */
+ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Quectel EG25G Modem */
+- { USB_DEVICE(0x2c7c, 0x0125), .driver_info = USB_QUIRK_RESET },
+-
+ { } /* terminating entry must be last */
+ };
+
+--
+2.31.1
+
diff --git a/sys-kernel/pinephone-sources/files/0002-bootsplash.patch b/sys-kernel/pinephone-sources/files/0002-bootsplash.patch
new file mode 100644
index 0000000..92d62ca
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0002-bootsplash.patch
@@ -0,0 +1,669 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index b5633b56391e..5c237445761e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2712,6 +2712,7 @@ S: Maintained
+ F: drivers/video/fbdev/core/bootsplash*.*
+ F: drivers/video/fbdev/core/dummycon.c
+ F: include/linux/bootsplash.h
++F: include/uapi/linux/bootsplash_file.h
+
+ BPF (Safe dynamic programs and tools)
+ M: Alexei Starovoitov
+diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
+index 66895321928e..6a8d1bab8a01 100644
+--- a/drivers/video/fbdev/core/Makefile
++++ b/drivers/video/fbdev/core/Makefile
+@@ -31,4 +31,4 @@ obj-$(CONFIG_FB_SVGALIB) += svgalib.o
+ obj-$(CONFIG_FB_DDC) += fb_ddc.o
+
+ obj-$(CONFIG_BOOTSPLASH) += bootsplash.o bootsplash_render.o \
+- dummyblit.o
++ bootsplash_load.o dummyblit.o
+diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c
+index e449755af268..843c5400fefc 100644
+--- a/drivers/video/fbdev/core/bootsplash.c
++++ b/drivers/video/fbdev/core/bootsplash.c
+@@ -32,6 +32,7 @@
+ #include
+
+ #include "bootsplash_internal.h"
++#include "uapi/linux/bootsplash_file.h"
+
+
+ /*
+@@ -102,10 +103,17 @@ static bool is_fb_compatible(const struct fb_info *info)
+ */
+ void bootsplash_render_full(struct fb_info *info)
+ {
++ mutex_lock(&splash_state.data_lock);
++
+ if (!is_fb_compatible(info))
+- return;
++ goto out;
++
++ bootsplash_do_render_background(info, splash_state.file);
++
++ bootsplash_do_render_pictures(info, splash_state.file);
+
+- bootsplash_do_render_background(info);
++out:
++ mutex_unlock(&splash_state.data_lock);
+ }
+
+
+@@ -116,6 +124,7 @@ bool bootsplash_would_render_now(void)
+ {
+ return !oops_in_progress
+ && !console_blanked
++ && splash_state.file
+ && bootsplash_is_enabled();
+ }
+
+@@ -252,6 +261,7 @@ static struct platform_driver splash_driver = {
+ void bootsplash_init(void)
+ {
+ int ret;
++ struct splash_file_priv *fp;
+
+ /* Initialized already? */
+ if (splash_state.splash_device)
+@@ -280,8 +290,26 @@ void bootsplash_init(void)
+ }
+
+
++ mutex_init(&splash_state.data_lock);
++ set_bit(0, &splash_state.enabled);
++
+ INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc);
+
++
++ if (!splash_state.bootfile || !strlen(splash_state.bootfile))
++ return;
++
++ fp = bootsplash_load_firmware(&splash_state.splash_device->dev,
++ splash_state.bootfile);
++
++ if (!fp)
++ goto err;
++
++ mutex_lock(&splash_state.data_lock);
++ splash_state.splash_fb = NULL;
++ splash_state.file = fp;
++ mutex_unlock(&splash_state.data_lock);
++
+ return;
+
+ err_device:
+@@ -292,3 +320,7 @@ void bootsplash_init(void)
+ err:
+ pr_err("Failed to initialize.\n");
+ }
++
++
++module_param_named(bootfile, splash_state.bootfile, charp, 0444);
++MODULE_PARM_DESC(bootfile, "Bootsplash file to load on boot");
+diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h
+index b11da5cb90bf..71e2a27ac0b8 100644
+--- a/drivers/video/fbdev/core/bootsplash_internal.h
++++ b/drivers/video/fbdev/core/bootsplash_internal.h
+@@ -15,15 +15,43 @@
+
+ #include
+ #include
++#include
+ #include
+ #include
+ #include
+
++#include "uapi/linux/bootsplash_file.h"
++
+
+ /*
+ * Runtime types
+ */
++struct splash_blob_priv {
++ struct splash_blob_header *blob_header;
++ const void *data;
++};
++
++
++struct splash_pic_priv {
++ const struct splash_pic_header *pic_header;
++
++ struct splash_blob_priv *blobs;
++ u16 blobs_loaded;
++};
++
++
++struct splash_file_priv {
++ const struct firmware *fw;
++ const struct splash_file_header *header;
++
++ struct splash_pic_priv *pics;
++};
++
++
+ struct splash_priv {
++ /* Bootup and runtime state */
++ char *bootfile;
++
+ /*
+ * Enabled/disabled state, to be used with atomic bit operations.
+ * Bit 0: 0 = Splash hidden
+@@ -43,6 +71,13 @@ struct splash_priv {
+ struct platform_device *splash_device;
+
+ struct work_struct work_redraw_vc;
++
++ /* Splash data structures including lock for everything below */
++ struct mutex data_lock;
++
++ struct fb_info *splash_fb;
++
++ struct splash_file_priv *file;
+ };
+
+
+@@ -50,6 +85,14 @@ struct splash_priv {
+ /*
+ * Rendering functions
+ */
+-void bootsplash_do_render_background(struct fb_info *info);
++void bootsplash_do_render_background(struct fb_info *info,
++ const struct splash_file_priv *fp);
++void bootsplash_do_render_pictures(struct fb_info *info,
++ const struct splash_file_priv *fp);
++
++
++void bootsplash_free_file(struct splash_file_priv *fp);
++struct splash_file_priv *bootsplash_load_firmware(struct device *device,
++ const char *path);
+
+ #endif
+diff --git a/drivers/video/fbdev/core/bootsplash_load.c b/drivers/video/fbdev/core/bootsplash_load.c
+new file mode 100644
+index 000000000000..fd807571ab7d
+--- /dev/null
++++ b/drivers/video/fbdev/core/bootsplash_load.c
+@@ -0,0 +1,225 @@
++/*
++ * Kernel based bootsplash.
++ *
++ * (Loading and freeing functions)
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * SPDX-License-Identifier: GPL-2.0
++ */
++
++#define pr_fmt(fmt) "bootsplash: " fmt
++
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#include "bootsplash_internal.h"
++#include "uapi/linux/bootsplash_file.h"
++
++
++
++
++/*
++ * Free all vmalloc()'d resources describing a splash file.
++ */
++void bootsplash_free_file(struct splash_file_priv *fp)
++{
++ if (!fp)
++ return;
++
++ if (fp->pics) {
++ unsigned int i;
++
++ for (i = 0; i < fp->header->num_pics; i++) {
++ struct splash_pic_priv *pp = &fp->pics[i];
++
++ if (pp->blobs)
++ vfree(pp->blobs);
++ }
++
++ vfree(fp->pics);
++ }
++
++ release_firmware(fp->fw);
++ vfree(fp);
++}
++
++
++
++
++/*
++ * Load a splash screen from a "firmware" file.
++ *
++ * Parsing, and sanity checks.
++ */
++#ifdef __BIG_ENDIAN
++ #define BOOTSPLASH_MAGIC BOOTSPLASH_MAGIC_BE
++#else
++ #define BOOTSPLASH_MAGIC BOOTSPLASH_MAGIC_LE
++#endif
++
++struct splash_file_priv *bootsplash_load_firmware(struct device *device,
++ const char *path)
++{
++ const struct firmware *fw;
++ struct splash_file_priv *fp;
++ unsigned int i;
++ const u8 *walker;
++
++ if (request_firmware(&fw, path, device))
++ return NULL;
++
++ if (fw->size < sizeof(struct splash_file_header)
++ || memcmp(fw->data, BOOTSPLASH_MAGIC, sizeof(fp->header->id))) {
++ pr_err("Not a bootsplash file.\n");
++
++ release_firmware(fw);
++ return NULL;
++ }
++
++ fp = vzalloc(sizeof(struct splash_file_priv));
++ if (!fp) {
++ release_firmware(fw);
++ return NULL;
++ }
++
++ pr_info("Loading splash file (%li bytes)\n", fw->size);
++
++ fp->fw = fw;
++ fp->header = (struct splash_file_header *)fw->data;
++
++ /* Sanity checks */
++ if (fp->header->version != BOOTSPLASH_VERSION) {
++ pr_err("Loaded v%d file, but we only support version %d\n",
++ fp->header->version,
++ BOOTSPLASH_VERSION);
++
++ goto err;
++ }
++
++ if (fw->size < sizeof(struct splash_file_header)
++ + fp->header->num_pics
++ * sizeof(struct splash_pic_header)
++ + fp->header->num_blobs
++ * sizeof(struct splash_blob_header)) {
++ pr_err("File incomplete.\n");
++
++ goto err;
++ }
++
++ /* Read picture headers */
++ if (fp->header->num_pics) {
++ fp->pics = vzalloc(fp->header->num_pics
++ * sizeof(struct splash_pic_priv));
++ if (!fp->pics)
++ goto err;
++ }
++
++ walker = fw->data + sizeof(struct splash_file_header);
++ for (i = 0; i < fp->header->num_pics; i++) {
++ struct splash_pic_priv *pp = &fp->pics[i];
++ struct splash_pic_header *ph = (void *)walker;
++
++ pr_debug("Picture %u: Size %ux%u\n", i, ph->width, ph->height);
++
++ if (ph->num_blobs < 1) {
++ pr_err("Picture %u: Zero blobs? Aborting load.\n", i);
++ goto err;
++ }
++
++ pp->pic_header = ph;
++ pp->blobs = vzalloc(ph->num_blobs
++ * sizeof(struct splash_blob_priv));
++ if (!pp->blobs)
++ goto err;
++
++ walker += sizeof(struct splash_pic_header);
++ }
++
++ /* Read blob headers */
++ for (i = 0; i < fp->header->num_blobs; i++) {
++ struct splash_blob_header *bh = (void *)walker;
++ struct splash_pic_priv *pp;
++
++ if (walker + sizeof(struct splash_blob_header)
++ > fw->data + fw->size)
++ goto err;
++
++ walker += sizeof(struct splash_blob_header);
++
++ if (walker + bh->length > fw->data + fw->size)
++ goto err;
++
++ if (bh->picture_id >= fp->header->num_pics)
++ goto nextblob;
++
++ pp = &fp->pics[bh->picture_id];
++
++ pr_debug("Blob %u, pic %u, blobs_loaded %u, num_blobs %u.\n",
++ i, bh->picture_id,
++ pp->blobs_loaded, pp->pic_header->num_blobs);
++
++ if (pp->blobs_loaded >= pp->pic_header->num_blobs)
++ goto nextblob;
++
++ switch (bh->type) {
++ case 0:
++ /* Raw 24-bit packed pixels */
++ if (bh->length != pp->pic_header->width
++ * pp->pic_header->height * 3) {
++ pr_err("Blob %u, type 1: Length doesn't match picture.\n",
++ i);
++
++ goto err;
++ }
++ break;
++ default:
++ pr_warn("Blob %u, unknown type %u.\n", i, bh->type);
++ goto nextblob;
++ }
++
++ pp->blobs[pp->blobs_loaded].blob_header = bh;
++ pp->blobs[pp->blobs_loaded].data = walker;
++ pp->blobs_loaded++;
++
++nextblob:
++ walker += bh->length;
++ if (bh->length % 16)
++ walker += 16 - (bh->length % 16);
++ }
++
++ if (walker != fw->data + fw->size)
++ pr_warn("Trailing data in splash file.\n");
++
++ /* Walk over pictures and ensure all blob slots are filled */
++ for (i = 0; i < fp->header->num_pics; i++) {
++ struct splash_pic_priv *pp = &fp->pics[i];
++
++ if (pp->blobs_loaded != pp->pic_header->num_blobs) {
++ pr_err("Picture %u doesn't have all blob slots filled.\n",
++ i);
++
++ goto err;
++ }
++ }
++
++ pr_info("Loaded (%ld bytes, %u pics, %u blobs).\n",
++ fw->size,
++ fp->header->num_pics,
++ fp->header->num_blobs);
++
++ return fp;
++
++
++err:
++ bootsplash_free_file(fp);
++ return NULL;
++}
+diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c
+index 4d7e0117f653..2ae36949d0e3 100644
+--- a/drivers/video/fbdev/core/bootsplash_render.c
++++ b/drivers/video/fbdev/core/bootsplash_render.c
+@@ -19,6 +19,7 @@
+ #include
+
+ #include "bootsplash_internal.h"
++#include "uapi/linux/bootsplash_file.h"
+
+
+
+@@ -70,16 +71,69 @@ static inline u32 pack_pixel(const struct fb_var_screeninfo *dst_var,
+ }
+
+
+-void bootsplash_do_render_background(struct fb_info *info)
++/*
++ * Copy from source and blend into the destination picture.
++ * Currently assumes that the source picture is 24bpp.
++ * Currently assumes that the destination is <= 32bpp.
++ */
++static int splash_convert_to_fb(u8 *dst,
++ const struct fb_var_screeninfo *dst_var,
++ unsigned int dst_stride,
++ unsigned int dst_xoff,
++ unsigned int dst_yoff,
++ const u8 *src,
++ unsigned int src_width,
++ unsigned int src_height)
++{
++ unsigned int x, y;
++ unsigned int src_stride = 3 * src_width; /* Assume 24bpp packed */
++ u32 dst_octpp = dst_var->bits_per_pixel / 8;
++
++ dst_xoff += dst_var->xoffset;
++ dst_yoff += dst_var->yoffset;
++
++ /* Copy with stride and pixel size adjustment */
++ for (y = 0;
++ y < src_height && y + dst_yoff < dst_var->yres_virtual;
++ y++) {
++ const u8 *srcline = src + (y * src_stride);
++ u8 *dstline = dst + ((y + dst_yoff) * dst_stride)
++ + (dst_xoff * dst_octpp);
++
++ for (x = 0;
++ x < src_width && x + dst_xoff < dst_var->xres_virtual;
++ x++) {
++ u8 red, green, blue;
++ u32 dstpix;
++
++ /* Read pixel */
++ red = *srcline++;
++ green = *srcline++;
++ blue = *srcline++;
++
++ /* Write pixel */
++ dstpix = pack_pixel(dst_var, red, green, blue);
++ memcpy(dstline, &dstpix, dst_octpp);
++
++ dstline += dst_octpp;
++ }
++ }
++
++ return 0;
++}
++
++
++void bootsplash_do_render_background(struct fb_info *info,
++ const struct splash_file_priv *fp)
+ {
+ unsigned int x, y;
+ u32 dstpix;
+ u32 dst_octpp = info->var.bits_per_pixel / 8;
+
+ dstpix = pack_pixel(&info->var,
+- 0,
+- 0,
+- 0);
++ fp->header->bg_red,
++ fp->header->bg_green,
++ fp->header->bg_blue);
+
+ for (y = 0; y < info->var.yres_virtual; y++) {
+ u8 *dstline = info->screen_buffer + (y * info->fix.line_length);
+@@ -91,3 +145,44 @@ void bootsplash_do_render_background(struct fb_info *info)
+ }
+ }
+ }
++
++
++void bootsplash_do_render_pictures(struct fb_info *info,
++ const struct splash_file_priv *fp)
++{
++ unsigned int i;
++
++ for (i = 0; i < fp->header->num_pics; i++) {
++ struct splash_blob_priv *bp;
++ struct splash_pic_priv *pp = &fp->pics[i];
++ long dst_xoff, dst_yoff;
++
++ if (pp->blobs_loaded < 1)
++ continue;
++
++ bp = &pp->blobs[0];
++
++ if (!bp || bp->blob_header->type != 0)
++ continue;
++
++ dst_xoff = (info->var.xres - pp->pic_header->width) / 2;
++ dst_yoff = (info->var.yres - pp->pic_header->height) / 2;
++
++ if (dst_xoff < 0
++ || dst_yoff < 0
++ || dst_xoff + pp->pic_header->width > info->var.xres
++ || dst_yoff + pp->pic_header->height > info->var.yres) {
++ pr_info_once("Picture %u is out of bounds at current resolution: %dx%d\n"
++ "(this will only be printed once every reboot)\n",
++ i, info->var.xres, info->var.yres);
++
++ continue;
++ }
++
++ /* Draw next splash frame */
++ splash_convert_to_fb(info->screen_buffer, &info->var,
++ info->fix.line_length, dst_xoff, dst_yoff,
++ bp->data,
++ pp->pic_header->width, pp->pic_header->height);
++ }
++}
+diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h
+new file mode 100644
+index 000000000000..89dc9cca8f0c
+--- /dev/null
++++ b/include/uapi/linux/bootsplash_file.h
+@@ -0,0 +1,118 @@
++/*
++ * Kernel based bootsplash.
++ *
++ * (File format)
++ *
++ * Authors:
++ * Max Staudt
++ *
++ * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
++ */
++
++#ifndef __BOOTSPLASH_FILE_H
++#define __BOOTSPLASH_FILE_H
++
++
++#define BOOTSPLASH_VERSION 55561
++
++
++#include
++#include
++
++
++/*
++ * On-disk types
++ *
++ * A splash file consists of:
++ * - One single 'struct splash_file_header'
++ * - An array of 'struct splash_pic_header'
++ * - An array of raw data blocks, each padded to 16 bytes and
++ * preceded by a 'struct splash_blob_header'
++ *
++ * A single-frame splash may look like this:
++ *
++ * +--------------------+
++ * | |
++ * | splash_file_header |
++ * | -> num_blobs = 1 |
++ * | -> num_pics = 1 |
++ * | |
++ * +--------------------+
++ * | |
++ * | splash_pic_header |
++ * | |
++ * +--------------------+
++ * | |
++ * | splash_blob_header |
++ * | -> type = 0 |
++ * | -> picture_id = 0 |
++ * | |
++ * | (raw RGB data) |
++ * | (pad to 16 bytes) |
++ * | |
++ * +--------------------+
++ *
++ * All multi-byte values are stored on disk in the native format
++ * expected by the system the file will be used on.
++ */
++#define BOOTSPLASH_MAGIC_BE "Linux bootsplash"
++#define BOOTSPLASH_MAGIC_LE "hsalpstoob xuniL"
++
++struct splash_file_header {
++ uint8_t id[16]; /* "Linux bootsplash" (no trailing NUL) */
++
++ /* Splash file format version to avoid clashes */
++ uint16_t version;
++
++ /* The background color */
++ uint8_t bg_red;
++ uint8_t bg_green;
++ uint8_t bg_blue;
++ uint8_t bg_reserved;
++
++ /*
++ * Number of pic/blobs so we can allocate memory for internal
++ * structures ahead of time when reading the file
++ */
++ uint16_t num_blobs;
++ uint8_t num_pics;
++
++ uint8_t padding[103];
++} __attribute__((__packed__));
++
++
++struct splash_pic_header {
++ uint16_t width;
++ uint16_t height;
++
++ /*
++ * Number of data packages associated with this picture.
++ * Currently, the only use for more than 1 is for animations.
++ */
++ uint8_t num_blobs;
++
++ uint8_t padding[27];
++} __attribute__((__packed__));
++
++
++struct splash_blob_header {
++ /* Length of the data block in bytes. */
++ uint32_t length;
++
++ /*
++ * Type of the contents.
++ * 0 - Raw RGB data.
++ */
++ uint16_t type;
++
++ /*
++ * Picture this blob is associated with.
++ * Blobs will be added to a picture in the order they are
++ * found in the file.
++ */
++ uint8_t picture_id;
++
++ uint8_t padding[9];
++} __attribute__((__packed__));
++
++#endif
diff --git a/sys-kernel/pinephone-sources/files/0002-dts-add-pinetab-dev-old-display-panel.patch b/sys-kernel/pinephone-sources/files/0002-dts-add-pinetab-dev-old-display-panel.patch
new file mode 100644
index 0000000..520d52d
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0002-dts-add-pinetab-dev-old-display-panel.patch
@@ -0,0 +1,61 @@
+From 33212e529708fd480eaf9cc76579f8e7044c0505 Mon Sep 17 00:00:00 2001
+From: Martijn Braam
+Date: Tue, 20 Oct 2020 14:42:01 +0200
+Subject: [PATCH] dts: add pinetab-dev (old display panel)
+
+---
+ arch/arm64/boot/dts/allwinner/Makefile | 1 +
+ .../dts/allwinner/sun50i-a64-pinetab-dev.dts | 29 +++++++++++++++++++
+ 2 files changed, 30 insertions(+)
+ create mode 100644 arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-dev.dts
+
+diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
+index a21cfdd8924d..2936092002b5 100644
+--- a/arch/arm64/boot/dts/allwinner/Makefile
++++ b/arch/arm64/boot/dts/allwinner/Makefile
+@@ -15,6 +15,7 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.0.dtb
+ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.1.dtb
+ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinephone-1.2.dtb
+ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinetab.dtb
++dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pinetab-dev.dtb
+ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-sopine-baseboard.dtb
+ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-teres-i.dtb
+ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-bananapi-m2-plus.dtb
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-dev.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-dev.dts
+new file mode 100644
+index 000000000000..1e287f2fb9f3
+--- /dev/null
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab-dev.dts
+@@ -0,0 +1,29 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Copyright (C) 2019 Icenowy Zheng
++ *
++ */
++
++/dts-v1/;
++
++#include "sun50i-a64-pinetab.dts"
++
++/ {
++ model = "PineTab";
++ compatible = "pine64,pinetab", "allwinner,sun50i-a64";
++};
++
++&dsi {
++ vcc-dsi-supply = <®_dldo1>;
++ status = "okay";
++
++ panel@0 {
++ compatible = "feixin,k101-im2ba02";
++ reg = <0>;
++ avdd-supply = <®_dc1sw>;
++ dvdd-supply = <®_dc1sw>;
++ cvdd-supply = <®_ldo_io1>;
++ reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */
++ backlight = <&backlight>;
++ };
++};
+--
+2.25.4
+
diff --git a/sys-kernel/pinephone-sources/files/0002-revert-fbcon-remove-no-op-fbcon_set_origin.patch b/sys-kernel/pinephone-sources/files/0002-revert-fbcon-remove-no-op-fbcon_set_origin.patch
new file mode 100644
index 0000000..6491c54
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0002-revert-fbcon-remove-no-op-fbcon_set_origin.patch
@@ -0,0 +1,31 @@
+--- b/drivers/video/fbdev/core/fbcon.c
++++ a/drivers/video/fbdev/core/fbcon.c
+@@ -163,6 +163,8 @@
+
+ #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
+
++static int fbcon_set_origin(struct vc_data *);
++
+ static int fbcon_cursor_noblink;
+
+ #define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
+@@ -2633,6 +2635,11 @@
+ }
+ }
+
++static int fbcon_set_origin(struct vc_data *vc)
++{
++ return 0;
++}
++
+ void fbcon_suspended(struct fb_info *info)
+ {
+ struct vc_data *vc = NULL;
+@@ -3103,6 +3110,7 @@
+ .con_font_default = fbcon_set_def_font,
+ .con_font_copy = fbcon_copy_font,
+ .con_set_palette = fbcon_set_palette,
++ .con_set_origin = fbcon_set_origin,
+ .con_invert_region = fbcon_invert_region,
+ .con_screen_pos = fbcon_screen_pos,
+ .con_getxy = fbcon_getxy,
diff --git a/sys-kernel/pinephone-sources/files/0002-revert-fbcon-remove-soft-scrollback-code.patch b/sys-kernel/pinephone-sources/files/0002-revert-fbcon-remove-soft-scrollback-code.patch
new file mode 100644
index 0000000..6d15dfd
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0002-revert-fbcon-remove-soft-scrollback-code.patch
@@ -0,0 +1,497 @@
+--- b/drivers/video/fbdev/core/fbcon.c
++++ a/drivers/video/fbdev/core/fbcon.c
+@@ -122,6 +122,12 @@
+ /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
+ enums. */
+ static int logo_shown = FBCON_LOGO_CANSHOW;
++/* Software scrollback */
++static int fbcon_softback_size = 32768;
++static unsigned long softback_buf, softback_curr;
++static unsigned long softback_in;
++static unsigned long softback_top, softback_end;
++static int softback_lines;
+ /* console mappings */
+ static int first_fb_vc;
+ static int last_fb_vc = MAX_NR_CONSOLES - 1;
+@@ -161,6 +167,8 @@
+
+ static const struct consw fb_con;
+
++#define CM_SOFTBACK (8)
++
+ #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
+
+ static int fbcon_set_origin(struct vc_data *);
+@@ -365,6 +373,18 @@
+ return color;
+ }
+
++static void fbcon_update_softback(struct vc_data *vc)
++{
++ int l = fbcon_softback_size / vc->vc_size_row;
++
++ if (l > 5)
++ softback_end = softback_buf + l * vc->vc_size_row;
++ else
++ /* Smaller scrollback makes no sense, and 0 would screw
++ the operation totally */
++ softback_top = 0;
++}
++
+ static void fb_flashcursor(struct work_struct *work)
+ {
+ struct fb_info *info = container_of(work, struct fb_info, queue);
+@@ -394,7 +414,7 @@
+ c = scr_readw((u16 *) vc->vc_pos);
+ mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
+ CM_ERASE : CM_DRAW;
++ ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
+- ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ console_unlock();
+ }
+@@ -451,7 +471,13 @@
+ }
+
+ if (!strncmp(options, "scrollback:", 11)) {
++ options += 11;
++ if (*options) {
++ fbcon_softback_size = simple_strtoul(options, &options, 0);
++ if (*options == 'k' || *options == 'K') {
++ fbcon_softback_size *= 1024;
++ }
++ }
+- pr_warn("Ignoring scrollback size option\n");
+ continue;
+ }
+
+@@ -996,6 +1022,31 @@
+
+ set_blitting_type(vc, info);
+
++ if (info->fix.type != FB_TYPE_TEXT) {
++ if (fbcon_softback_size) {
++ if (!softback_buf) {
++ softback_buf =
++ (unsigned long)
++ kvmalloc(fbcon_softback_size,
++ GFP_KERNEL);
++ if (!softback_buf) {
++ fbcon_softback_size = 0;
++ softback_top = 0;
++ }
++ }
++ } else {
++ if (softback_buf) {
++ kvfree((void *) softback_buf);
++ softback_buf = 0;
++ softback_top = 0;
++ }
++ }
++ if (softback_buf)
++ softback_in = softback_top = softback_curr =
++ softback_buf;
++ softback_lines = 0;
++ }
++
+ /* Setup default font */
+ if (!p->fontdata && !vc->vc_font.data) {
+ if (!fontname[0] || !(font = find_font(fontname)))
+@@ -1169,6 +1220,9 @@
+ if (logo)
+ fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
+
++ if (vc == svc && softback_buf)
++ fbcon_update_softback(vc);
++
+ if (ops->rotate_font && ops->rotate_font(info, vc)) {
+ ops->rotate = FB_ROTATE_UR;
+ set_blitting_type(vc, info);
+@@ -1331,6 +1385,7 @@
+ {
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
++ int y;
+ int c = scr_readw((u16 *) vc->vc_pos);
+
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+@@ -1344,8 +1399,16 @@
+ fbcon_add_cursor_timer(info);
+
+ ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
++ if (mode & CM_SOFTBACK) {
++ mode &= ~CM_SOFTBACK;
++ y = softback_lines;
++ } else {
++ if (softback_lines)
++ fbcon_set_origin(vc);
++ y = 0;
++ }
+
++ ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
+- ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ }
+
+@@ -1416,6 +1479,8 @@
+
+ if (con_is_visible(vc)) {
+ update_screen(vc);
++ if (softback_buf)
++ fbcon_update_softback(vc);
+ }
+ }
+
+@@ -1553,6 +1618,99 @@
+ scrollback_current = 0;
+ }
+
++static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
++ long delta)
++{
++ int count = vc->vc_rows;
++ unsigned short *d, *s;
++ unsigned long n;
++ int line = 0;
++
++ d = (u16 *) softback_curr;
++ if (d == (u16 *) softback_in)
++ d = (u16 *) vc->vc_origin;
++ n = softback_curr + delta * vc->vc_size_row;
++ softback_lines -= delta;
++ if (delta < 0) {
++ if (softback_curr < softback_top && n < softback_buf) {
++ n += softback_end - softback_buf;
++ if (n < softback_top) {
++ softback_lines -=
++ (softback_top - n) / vc->vc_size_row;
++ n = softback_top;
++ }
++ } else if (softback_curr >= softback_top
++ && n < softback_top) {
++ softback_lines -=
++ (softback_top - n) / vc->vc_size_row;
++ n = softback_top;
++ }
++ } else {
++ if (softback_curr > softback_in && n >= softback_end) {
++ n += softback_buf - softback_end;
++ if (n > softback_in) {
++ n = softback_in;
++ softback_lines = 0;
++ }
++ } else if (softback_curr <= softback_in && n > softback_in) {
++ n = softback_in;
++ softback_lines = 0;
++ }
++ }
++ if (n == softback_curr)
++ return;
++ softback_curr = n;
++ s = (u16 *) softback_curr;
++ if (s == (u16 *) softback_in)
++ s = (u16 *) vc->vc_origin;
++ while (count--) {
++ unsigned short *start;
++ unsigned short *le;
++ unsigned short c;
++ int x = 0;
++ unsigned short attr = 1;
++
++ start = s;
++ le = advance_row(s, 1);
++ do {
++ c = scr_readw(s);
++ if (attr != (c & 0xff00)) {
++ attr = c & 0xff00;
++ if (s > start) {
++ fbcon_putcs(vc, start, s - start,
++ line, x);
++ x += s - start;
++ start = s;
++ }
++ }
++ if (c == scr_readw(d)) {
++ if (s > start) {
++ fbcon_putcs(vc, start, s - start,
++ line, x);
++ x += s - start + 1;
++ start = s + 1;
++ } else {
++ x++;
++ start++;
++ }
++ }
++ s++;
++ d++;
++ } while (s < le);
++ if (s > start)
++ fbcon_putcs(vc, start, s - start, line, x);
++ line++;
++ if (d == (u16 *) softback_end)
++ d = (u16 *) softback_buf;
++ if (d == (u16 *) softback_in)
++ d = (u16 *) vc->vc_origin;
++ if (s == (u16 *) softback_end)
++ s = (u16 *) softback_buf;
++ if (s == (u16 *) softback_in)
++ s = (u16 *) vc->vc_origin;
++ }
++}
++
+ static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy)
+ {
+@@ -1692,6 +1850,31 @@
+ }
+ }
+
++static inline void fbcon_softback_note(struct vc_data *vc, int t,
++ int count)
++{
++ unsigned short *p;
++
++ if (vc->vc_num != fg_console)
++ return;
++ p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
++
++ while (count) {
++ scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
++ count--;
++ p = advance_row(p, 1);
++ softback_in += vc->vc_size_row;
++ if (softback_in == softback_end)
++ softback_in = softback_buf;
++ if (softback_in == softback_top) {
++ softback_top += vc->vc_size_row;
++ if (softback_top == softback_end)
++ softback_top = softback_buf;
++ }
++ }
++ softback_curr = softback_in;
++}
++
+ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int count)
+ {
+@@ -1714,6 +1897,8 @@
+ case SM_UP:
+ if (count > vc->vc_rows) /* Maximum realistic size */
+ count = vc->vc_rows;
++ if (softback_top)
++ fbcon_softback_note(vc, t, count);
+ if (logo_shown >= 0)
+ goto redraw_up;
+ switch (p->scrollmode) {
+@@ -2084,6 +2269,14 @@
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
+
++ if (softback_top) {
++ if (softback_lines)
++ fbcon_set_origin(vc);
++ softback_top = softback_curr = softback_in = softback_buf;
++ softback_lines = 0;
++ fbcon_update_softback(vc);
++ }
++
+ if (logo_shown >= 0) {
+ struct vc_data *conp2 = vc_cons[logo_shown].d;
+
+@@ -2407,6 +2600,9 @@
+ int cnt;
+ char *old_data = NULL;
+
++ if (con_is_visible(vc) && softback_lines)
++ fbcon_set_origin(vc);
++
+ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+ if (p->userfont)
+ old_data = vc->vc_font.data;
+@@ -2432,6 +2628,8 @@
+ cols /= w;
+ rows /= h;
+ vc_resize(vc, cols, rows);
++ if (con_is_visible(vc) && softback_buf)
++ fbcon_update_softback(vc);
+ } else if (con_is_visible(vc)
+ && vc->vc_mode == KD_TEXT) {
+ fbcon_clear_margins(vc, 0);
+@@ -2590,7 +2788,19 @@
+
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
++ unsigned long p;
++ int line;
++
++ if (vc->vc_num != fg_console || !softback_lines)
++ return (u16 *) (vc->vc_origin + offset);
++ line = offset / vc->vc_size_row;
++ if (line >= softback_lines)
++ return (u16 *) (vc->vc_origin + offset -
++ softback_lines * vc->vc_size_row);
++ p = softback_curr + offset;
++ if (p >= softback_end)
++ p += softback_buf - softback_end;
++ return (u16 *) p;
+- return (u16 *) (vc->vc_origin + offset);
+ }
+
+ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
+@@ -2604,7 +2814,22 @@
+
+ x = offset % vc->vc_cols;
+ y = offset / vc->vc_cols;
++ if (vc->vc_num == fg_console)
++ y += softback_lines;
+ ret = pos + (vc->vc_cols - x) * 2;
++ } else if (vc->vc_num == fg_console && softback_lines) {
++ unsigned long offset = pos - softback_curr;
++
++ if (pos < softback_curr)
++ offset += softback_end - softback_buf;
++ offset /= 2;
++ x = offset % vc->vc_cols;
++ y = offset / vc->vc_cols;
++ ret = pos + (vc->vc_cols - x) * 2;
++ if (ret == softback_end)
++ ret = softback_buf;
++ if (ret == softback_in)
++ ret = vc->vc_origin;
+ } else {
+ /* Should not happen */
+ x = y = 0;
+@@ -2632,11 +2857,106 @@
+ a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) |
+ (((a) & 0x0700) << 4);
+ scr_writew(a, p++);
++ if (p == (u16 *) softback_end)
++ p = (u16 *) softback_buf;
++ if (p == (u16 *) softback_in)
++ p = (u16 *) vc->vc_origin;
++ }
++}
++
++static void fbcon_scrolldelta(struct vc_data *vc, int lines)
++{
++ struct fb_info *info = registered_fb[con2fb_map[fg_console]];
++ struct fbcon_ops *ops = info->fbcon_par;
++ struct fbcon_display *disp = &fb_display[fg_console];
++ int offset, limit, scrollback_old;
++
++ if (softback_top) {
++ if (vc->vc_num != fg_console)
++ return;
++ if (vc->vc_mode != KD_TEXT || !lines)
++ return;
++ if (logo_shown >= 0) {
++ struct vc_data *conp2 = vc_cons[logo_shown].d;
++
++ if (conp2->vc_top == logo_lines
++ && conp2->vc_bottom == conp2->vc_rows)
++ conp2->vc_top = 0;
++ if (logo_shown == vc->vc_num) {
++ unsigned long p, q;
++ int i;
++
++ p = softback_in;
++ q = vc->vc_origin +
++ logo_lines * vc->vc_size_row;
++ for (i = 0; i < logo_lines; i++) {
++ if (p == softback_top)
++ break;
++ if (p == softback_buf)
++ p = softback_end;
++ p -= vc->vc_size_row;
++ q -= vc->vc_size_row;
++ scr_memcpyw((u16 *) q, (u16 *) p,
++ vc->vc_size_row);
++ }
++ softback_in = softback_curr = p;
++ update_region(vc, vc->vc_origin,
++ logo_lines * vc->vc_cols);
++ }
++ logo_shown = FBCON_LOGO_CANSHOW;
++ }
++ fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
++ fbcon_redraw_softback(vc, disp, lines);
++ fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
++ return;
+ }
++
++ if (!scrollback_phys_max)
++ return;
++
++ scrollback_old = scrollback_current;
++ scrollback_current -= lines;
++ if (scrollback_current < 0)
++ scrollback_current = 0;
++ else if (scrollback_current > scrollback_max)
++ scrollback_current = scrollback_max;
++ if (scrollback_current == scrollback_old)
++ return;
++
++ if (fbcon_is_inactive(vc, info))
++ return;
++
++ fbcon_cursor(vc, CM_ERASE);
++
++ offset = disp->yscroll - scrollback_current;
++ limit = disp->vrows;
++ switch (disp->scrollmode) {
++ case SCROLL_WRAP_MOVE:
++ info->var.vmode |= FB_VMODE_YWRAP;
++ break;
++ case SCROLL_PAN_MOVE:
++ case SCROLL_PAN_REDRAW:
++ limit -= vc->vc_rows;
++ info->var.vmode &= ~FB_VMODE_YWRAP;
++ break;
++ }
++ if (offset < 0)
++ offset += limit;
++ else if (offset >= limit)
++ offset -= limit;
++
++ ops->var.xoffset = 0;
++ ops->var.yoffset = offset * vc->vc_font.height;
++ ops->update_start(info);
++
++ if (!scrollback_current)
++ fbcon_cursor(vc, CM_DRAW);
+ }
+
+ static int fbcon_set_origin(struct vc_data *vc)
+ {
++ if (softback_lines)
++ fbcon_scrolldelta(vc, softback_lines);
+ return 0;
+ }
+
+@@ -2700,6 +3020,8 @@
+
+ fbcon_set_palette(vc, color_table);
+ update_screen(vc);
++ if (softback_buf)
++ fbcon_update_softback(vc);
+ }
+ }
+
+@@ -3110,6 +3432,7 @@
+ .con_font_default = fbcon_set_def_font,
+ .con_font_copy = fbcon_copy_font,
+ .con_set_palette = fbcon_set_palette,
++ .con_scrolldelta = fbcon_scrolldelta,
+ .con_set_origin = fbcon_set_origin,
+ .con_invert_region = fbcon_invert_region,
+ .con_screen_pos = fbcon_screen_pos,
+@@ -3344,6 +3667,9 @@
+ }
+ #endif
+
++ kvfree((void *)softback_buf);
++ softback_buf = 0UL;
++
+ for_each_registered_fb(i) {
+ int pending = 0;
+
diff --git a/sys-kernel/pinephone-sources/files/0003-Bluetooth-Fix-attempting-to-set-RPA-timeout-when-unsupported.patch b/sys-kernel/pinephone-sources/files/0003-Bluetooth-Fix-attempting-to-set-RPA-timeout-when-unsupported.patch
new file mode 100644
index 0000000..f6bebdd
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0003-Bluetooth-Fix-attempting-to-set-RPA-timeout-when-unsupported.patch
@@ -0,0 +1,48 @@
+From 4264c74c96e7907b60ee6ed82670317d19ed7ebe Mon Sep 17 00:00:00 2001
+From: Edward Vear
+Date: Tue, 27 Oct 2020 00:02:03 -0700
+Subject: Bluetooth: Fix attempting to set RPA timeout when unsupported
+
+During controller initialization, an LE Set RPA Timeout command is sent
+to the controller if supported. However, the value checked to determine
+if the command is supported is incorrect. Page 1921 of the Bluetooth
+Core Spec v5.2 shows that bit 2 of octet 35 of the Supported_Commands
+field corresponds to the LE Set RPA Timeout command, but currently
+bit 6 of octet 35 is checked. This patch checks the correct value
+instead.
+
+This issue led to the error seen in the following btmon output during
+initialization of an adapter (rtl8761b) and prevented initialization
+from completing.
+
+< HCI Command: LE Set Resolvable Private Address Timeout (0x08|0x002e) plen 2
+ Timeout: 900 seconds
+> HCI Event: Command Complete (0x0e) plen 4
+ LE Set Resolvable Private Address Timeout (0x08|0x002e) ncmd 2
+ Status: Unsupported Remote Feature / Unsupported LMP Feature (0x1a)
+= Close Index: 00:E0:4C:6B:E5:03
+
+The error did not appear when running with this patch.
+
+Signed-off-by: Edward Vear
+Signed-off-by: Marcel Holtmann
+---
+ net/bluetooth/hci_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index be9cdf5dabe5..30a5267af490 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -763,7 +763,7 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
+ hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
+ }
+
+- if (hdev->commands[35] & 0x40) {
++ if (hdev->commands[35] & 0x04) {
+ __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
+
+ /* Set RPA timeout */
+--
+cgit v1.2.3-1-gf6bb5
+
diff --git a/sys-kernel/pinephone-sources/files/0003-Bluetooth-btusb.patch b/sys-kernel/pinephone-sources/files/0003-Bluetooth-btusb.patch
new file mode 100644
index 0000000..14375ec
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0003-Bluetooth-btusb.patch
@@ -0,0 +1,40 @@
+Bluetooth: btusb: Some Qualcomm Bluetooth adapters stop working
+This issue starts from linux-5.10-rc1, I reproduced this issue on my
+Dell Inspiron 7447 with BT adapter 0cf3:e005, the kernel will print
+out: "Bluetooth: hci0: don't support firmware rome 0x31010000", and
+someone else also reported the similar issue to bugzilla #211571.
+
+I found this is a regression introduced by 'commit b40f58b97386
+("Bluetooth: btusb: Add Qualcomm Bluetooth SoC WCN6855 support"), the
+patch assumed that if high ROM version is not zero, it is an adapter
+on WCN6855, but many old adapters don't need to load rampatch or nvm,
+and they have non-zero high ROM version.
+
+To fix it, let the driver match the rom_version in the
+qca_devices_table first, if there is no entry matched, check the
+high ROM version, if it is not zero, we assume this adapter is ready
+to work and no need to load rampatch and nvm like previously.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=211571
+Fixes: b40f58b97386 ("Bluetooth: btusb: Add Qualcomm Bluetooth SoC WCN6855 support")
+Signed-off-by: Hui Wang
+Signed-off-by: Marcel Holtmann
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 03b83aa91277..32161dd40ed6 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -4069,6 +4069,13 @@ static int btusb_setup_qca(struct hci_dev *hdev)
+ info = &qca_devices_table[i];
+ }
+ if (!info) {
++ /* If the rom_version is not matched in the qca_devices_table
++ * and the high ROM version is not zero, we assume this chip no
++ * need to load the rampatch and nvm.
++ */
++ if (ver_rom & ~0xffffU)
++ return 0;
++
+ bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
+ return -ENODEV;
+ }
diff --git a/sys-kernel/pinephone-sources/files/0003-bootsplash.patch b/sys-kernel/pinephone-sources/files/0003-bootsplash.patch
new file mode 100644
index 0000000..2169537
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0003-bootsplash.patch
@@ -0,0 +1,66 @@
+diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c
+index 843c5400fefc..815b007f81ca 100644
+--- a/drivers/video/fbdev/core/bootsplash.c
++++ b/drivers/video/fbdev/core/bootsplash.c
+@@ -112,6 +112,8 @@ void bootsplash_render_full(struct fb_info *info)
+
+ bootsplash_do_render_pictures(info, splash_state.file);
+
++ bootsplash_do_render_flush(info);
++
+ out:
+ mutex_unlock(&splash_state.data_lock);
+ }
+diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h
+index 71e2a27ac0b8..0acb383aa4e3 100644
+--- a/drivers/video/fbdev/core/bootsplash_internal.h
++++ b/drivers/video/fbdev/core/bootsplash_internal.h
+@@ -89,6 +89,7 @@ void bootsplash_do_render_background(struct fb_info *info,
+ const struct splash_file_priv *fp);
+ void bootsplash_do_render_pictures(struct fb_info *info,
+ const struct splash_file_priv *fp);
++void bootsplash_do_render_flush(struct fb_info *info);
+
+
+ void bootsplash_free_file(struct splash_file_priv *fp);
+diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c
+index 2ae36949d0e3..8c09c306ff67 100644
+--- a/drivers/video/fbdev/core/bootsplash_render.c
++++ b/drivers/video/fbdev/core/bootsplash_render.c
+@@ -186,3 +186,36 @@ void bootsplash_do_render_pictures(struct fb_info *info,
+ pp->pic_header->width, pp->pic_header->height);
+ }
+ }
++
++
++void bootsplash_do_render_flush(struct fb_info *info)
++{
++ /*
++ * FB drivers using deferred_io (such as Xen) need to sync the
++ * screen after modifying its contents. When the FB is mmap()ed
++ * from userspace, this happens via a dirty pages callback, but
++ * when modifying the FB from the kernel, there is no such thing.
++ *
++ * So let's issue a fake fb_copyarea (copying the FB onto itself)
++ * to trick the FB driver into syncing the screen.
++ *
++ * A few DRM drivers' FB implementations are broken by not using
++ * deferred_io when they really should - we match on the known
++ * bad ones manually for now.
++ */
++ if (info->fbdefio
++ || !strcmp(info->fix.id, "astdrmfb")
++ || !strcmp(info->fix.id, "cirrusdrmfb")
++ || !strcmp(info->fix.id, "mgadrmfb")) {
++ struct fb_copyarea area;
++
++ area.dx = 0;
++ area.dy = 0;
++ area.width = info->var.xres;
++ area.height = info->var.yres;
++ area.sx = 0;
++ area.sy = 0;
++
++ info->fbops->fb_copyarea(info, &area);
++ }
++}
diff --git a/sys-kernel/pinephone-sources/files/0003-qmi_wwan-provide-wrapper-for-reset_resume.patch b/sys-kernel/pinephone-sources/files/0003-qmi_wwan-provide-wrapper-for-reset_resume.patch
new file mode 100644
index 0000000..ae9a456
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0003-qmi_wwan-provide-wrapper-for-reset_resume.patch
@@ -0,0 +1,51 @@
+From 562a6c114ce736db51e41b8c06c408104b79b126 Mon Sep 17 00:00:00 2001
+From: Bhushan Shah
+Date: Wed, 14 Apr 2021 10:29:39 +0530
+Subject: [PATCH 3/5] qmi_wwan: provide wrapper for reset_resume
+
+---
+ drivers/net/usb/qmi_wwan.c | 21 ++++++++++++++++++++-
+ 1 file changed, 20 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index e18ded349d840..cd6ae9696b56a 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -840,6 +840,25 @@ static int qmi_wwan_resume(struct usb_interface *intf)
+ return ret;
+ }
+
++static int qmi_wwan_reset_resume(struct usb_interface *intf)
++{
++ struct usbnet *dev = usb_get_intfdata(intf);
++ struct qmi_wwan_state *info = (void *)&dev->data;
++ int ret = 0;
++ bool callsub = (intf == info->control && info->subdriver &&
++ info->subdriver->reset_resume);
++
++ if (callsub)
++ ret = info->subdriver->reset_resume(intf);
++ if (ret < 0)
++ goto err;
++ ret = usbnet_resume(intf);
++ if (ret < 0 && callsub)
++ info->subdriver->suspend(intf, PMSG_SUSPEND);
++err:
++ return ret;
++}
++
+ static const struct driver_info qmi_wwan_info = {
+ .description = "WWAN/QMI device",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+@@ -1478,7 +1497,7 @@ static struct usb_driver qmi_wwan_driver = {
+ .disconnect = qmi_wwan_disconnect,
+ .suspend = qmi_wwan_suspend,
+ .resume = qmi_wwan_resume,
+- .reset_resume = qmi_wwan_resume,
++ .reset_resume = qmi_wwan_reset_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+ };
+--
+2.31.1
+
diff --git a/sys-kernel/pinephone-sources/files/0003-revert-fbcon-remove-soft-scrollback-code.patch b/sys-kernel/pinephone-sources/files/0003-revert-fbcon-remove-soft-scrollback-code.patch
new file mode 100644
index 0000000..4f97354
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0003-revert-fbcon-remove-soft-scrollback-code.patch
@@ -0,0 +1,500 @@
+--- b/drivers/video/fbdev/core/fbcon.c
++++ a/drivers/video/fbdev/core/fbcon.c
+@@ -122,6 +122,12 @@
+ /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
+ enums. */
+ static int logo_shown = FBCON_LOGO_CANSHOW;
++/* Software scrollback */
++static int fbcon_softback_size = 32768;
++static unsigned long softback_buf, softback_curr;
++static unsigned long softback_in;
++static unsigned long softback_top, softback_end;
++static int softback_lines;
+ /* console mappings */
+ static int first_fb_vc;
+ static int last_fb_vc = MAX_NR_CONSOLES - 1;
+@@ -161,6 +167,8 @@
+
+ static const struct consw fb_con;
+
++#define CM_SOFTBACK (8)
++
+ #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
+
+ static int fbcon_set_origin(struct vc_data *);
+@@ -365,6 +373,18 @@
+ return color;
+ }
+
++static void fbcon_update_softback(struct vc_data *vc)
++{
++ int l = fbcon_softback_size / vc->vc_size_row;
++
++ if (l > 5)
++ softback_end = softback_buf + l * vc->vc_size_row;
++ else
++ /* Smaller scrollback makes no sense, and 0 would screw
++ the operation totally */
++ softback_top = 0;
++}
++
+ static void fb_flashcursor(struct work_struct *work)
+ {
+ struct fb_info *info = container_of(work, struct fb_info, queue);
+@@ -394,7 +414,7 @@
+ c = scr_readw((u16 *) vc->vc_pos);
+ mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
+ CM_ERASE : CM_DRAW;
++ ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
+- ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ console_unlock();
+ }
+@@ -451,7 +471,13 @@
+ }
+
+ if (!strncmp(options, "scrollback:", 11)) {
++ options += 11;
++ if (*options) {
++ fbcon_softback_size = simple_strtoul(options, &options, 0);
++ if (*options == 'k' || *options == 'K') {
++ fbcon_softback_size *= 1024;
++ }
++ }
+- pr_warn("Ignoring scrollback size option\n");
+ continue;
+ }
+
+@@ -996,6 +1022,31 @@
+
+ set_blitting_type(vc, info);
+
++ if (info->fix.type != FB_TYPE_TEXT) {
++ if (fbcon_softback_size) {
++ if (!softback_buf) {
++ softback_buf =
++ (unsigned long)
++ kvmalloc(fbcon_softback_size,
++ GFP_KERNEL);
++ if (!softback_buf) {
++ fbcon_softback_size = 0;
++ softback_top = 0;
++ }
++ }
++ } else {
++ if (softback_buf) {
++ kvfree((void *) softback_buf);
++ softback_buf = 0;
++ softback_top = 0;
++ }
++ }
++ if (softback_buf)
++ softback_in = softback_top = softback_curr =
++ softback_buf;
++ softback_lines = 0;
++ }
++
+ /* Setup default font */
+ if (!p->fontdata && !vc->vc_font.data) {
+ if (!fontname[0] || !(font = find_font(fontname)))
+@@ -1169,6 +1220,9 @@
+ if (logo)
+ fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
+
++ if (vc == svc && softback_buf)
++ fbcon_update_softback(vc);
++
+ if (ops->rotate_font && ops->rotate_font(info, vc)) {
+ ops->rotate = FB_ROTATE_UR;
+ set_blitting_type(vc, info);
+@@ -1331,6 +1385,7 @@
+ {
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
++ int y;
+ int c = scr_readw((u16 *) vc->vc_pos);
+
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+@@ -1334,11 +1389,19 @@ static void fbcon_cursor(struct vc_data
+ fbcon_add_cursor_timer(info);
+
+ ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
++ if (mode & CM_SOFTBACK) {
++ mode &= ~CM_SOFTBACK;
++ y = softback_lines;
++ } else {
++ if (softback_lines)
++ fbcon_set_origin(vc);
++ y = 0;
++ }
+
+ if (!ops->cursor)
+ return;
+
+- ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1),
++ ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
+ get_color(vc, info, c, 0));
+ }
+
+@@ -1416,6 +1479,8 @@
+
+ if (con_is_visible(vc)) {
+ update_screen(vc);
++ if (softback_buf)
++ fbcon_update_softback(vc);
+ }
+ }
+
+@@ -1553,6 +1618,99 @@
+ scrollback_current = 0;
+ }
+
++static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
++ long delta)
++{
++ int count = vc->vc_rows;
++ unsigned short *d, *s;
++ unsigned long n;
++ int line = 0;
++
++ d = (u16 *) softback_curr;
++ if (d == (u16 *) softback_in)
++ d = (u16 *) vc->vc_origin;
++ n = softback_curr + delta * vc->vc_size_row;
++ softback_lines -= delta;
++ if (delta < 0) {
++ if (softback_curr < softback_top && n < softback_buf) {
++ n += softback_end - softback_buf;
++ if (n < softback_top) {
++ softback_lines -=
++ (softback_top - n) / vc->vc_size_row;
++ n = softback_top;
++ }
++ } else if (softback_curr >= softback_top
++ && n < softback_top) {
++ softback_lines -=
++ (softback_top - n) / vc->vc_size_row;
++ n = softback_top;
++ }
++ } else {
++ if (softback_curr > softback_in && n >= softback_end) {
++ n += softback_buf - softback_end;
++ if (n > softback_in) {
++ n = softback_in;
++ softback_lines = 0;
++ }
++ } else if (softback_curr <= softback_in && n > softback_in) {
++ n = softback_in;
++ softback_lines = 0;
++ }
++ }
++ if (n == softback_curr)
++ return;
++ softback_curr = n;
++ s = (u16 *) softback_curr;
++ if (s == (u16 *) softback_in)
++ s = (u16 *) vc->vc_origin;
++ while (count--) {
++ unsigned short *start;
++ unsigned short *le;
++ unsigned short c;
++ int x = 0;
++ unsigned short attr = 1;
++
++ start = s;
++ le = advance_row(s, 1);
++ do {
++ c = scr_readw(s);
++ if (attr != (c & 0xff00)) {
++ attr = c & 0xff00;
++ if (s > start) {
++ fbcon_putcs(vc, start, s - start,
++ line, x);
++ x += s - start;
++ start = s;
++ }
++ }
++ if (c == scr_readw(d)) {
++ if (s > start) {
++ fbcon_putcs(vc, start, s - start,
++ line, x);
++ x += s - start + 1;
++ start = s + 1;
++ } else {
++ x++;
++ start++;
++ }
++ }
++ s++;
++ d++;
++ } while (s < le);
++ if (s > start)
++ fbcon_putcs(vc, start, s - start, line, x);
++ line++;
++ if (d == (u16 *) softback_end)
++ d = (u16 *) softback_buf;
++ if (d == (u16 *) softback_in)
++ d = (u16 *) vc->vc_origin;
++ if (s == (u16 *) softback_end)
++ s = (u16 *) softback_buf;
++ if (s == (u16 *) softback_in)
++ s = (u16 *) vc->vc_origin;
++ }
++}
++
+ static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy)
+ {
+@@ -1692,6 +1850,31 @@
+ }
+ }
+
++static inline void fbcon_softback_note(struct vc_data *vc, int t,
++ int count)
++{
++ unsigned short *p;
++
++ if (vc->vc_num != fg_console)
++ return;
++ p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
++
++ while (count) {
++ scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
++ count--;
++ p = advance_row(p, 1);
++ softback_in += vc->vc_size_row;
++ if (softback_in == softback_end)
++ softback_in = softback_buf;
++ if (softback_in == softback_top) {
++ softback_top += vc->vc_size_row;
++ if (softback_top == softback_end)
++ softback_top = softback_buf;
++ }
++ }
++ softback_curr = softback_in;
++}
++
+ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ enum con_scroll dir, unsigned int count)
+ {
+@@ -1714,6 +1897,8 @@
+ case SM_UP:
+ if (count > vc->vc_rows) /* Maximum realistic size */
+ count = vc->vc_rows;
++ if (softback_top)
++ fbcon_softback_note(vc, t, count);
+ if (logo_shown >= 0)
+ goto redraw_up;
+ switch (p->scrollmode) {
+@@ -2084,6 +2269,14 @@
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
+
++ if (softback_top) {
++ if (softback_lines)
++ fbcon_set_origin(vc);
++ softback_top = softback_curr = softback_in = softback_buf;
++ softback_lines = 0;
++ fbcon_update_softback(vc);
++ }
++
+ if (logo_shown >= 0) {
+ struct vc_data *conp2 = vc_cons[logo_shown].d;
+
+@@ -2407,6 +2600,9 @@
+ int cnt;
+ char *old_data = NULL;
+
++ if (con_is_visible(vc) && softback_lines)
++ fbcon_set_origin(vc);
++
+ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+ if (p->userfont)
+ old_data = vc->vc_font.data;
+@@ -2432,6 +2628,8 @@
+ cols /= w;
+ rows /= h;
+ vc_resize(vc, cols, rows);
++ if (con_is_visible(vc) && softback_buf)
++ fbcon_update_softback(vc);
+ } else if (con_is_visible(vc)
+ && vc->vc_mode == KD_TEXT) {
+ fbcon_clear_margins(vc, 0);
+@@ -2590,7 +2788,19 @@
+
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
++ unsigned long p;
++ int line;
++
++ if (vc->vc_num != fg_console || !softback_lines)
++ return (u16 *) (vc->vc_origin + offset);
++ line = offset / vc->vc_size_row;
++ if (line >= softback_lines)
++ return (u16 *) (vc->vc_origin + offset -
++ softback_lines * vc->vc_size_row);
++ p = softback_curr + offset;
++ if (p >= softback_end)
++ p += softback_buf - softback_end;
++ return (u16 *) p;
+- return (u16 *) (vc->vc_origin + offset);
+ }
+
+ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
+@@ -2604,7 +2814,22 @@
+
+ x = offset % vc->vc_cols;
+ y = offset / vc->vc_cols;
++ if (vc->vc_num == fg_console)
++ y += softback_lines;
+ ret = pos + (vc->vc_cols - x) * 2;
++ } else if (vc->vc_num == fg_console && softback_lines) {
++ unsigned long offset = pos - softback_curr;
++
++ if (pos < softback_curr)
++ offset += softback_end - softback_buf;
++ offset /= 2;
++ x = offset % vc->vc_cols;
++ y = offset / vc->vc_cols;
++ ret = pos + (vc->vc_cols - x) * 2;
++ if (ret == softback_end)
++ ret = softback_buf;
++ if (ret == softback_in)
++ ret = vc->vc_origin;
+ } else {
+ /* Should not happen */
+ x = y = 0;
+@@ -2632,11 +2857,106 @@
+ a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) |
+ (((a) & 0x0700) << 4);
+ scr_writew(a, p++);
++ if (p == (u16 *) softback_end)
++ p = (u16 *) softback_buf;
++ if (p == (u16 *) softback_in)
++ p = (u16 *) vc->vc_origin;
++ }
++}
++
++static void fbcon_scrolldelta(struct vc_data *vc, int lines)
++{
++ struct fb_info *info = registered_fb[con2fb_map[fg_console]];
++ struct fbcon_ops *ops = info->fbcon_par;
++ struct fbcon_display *disp = &fb_display[fg_console];
++ int offset, limit, scrollback_old;
++
++ if (softback_top) {
++ if (vc->vc_num != fg_console)
++ return;
++ if (vc->vc_mode != KD_TEXT || !lines)
++ return;
++ if (logo_shown >= 0) {
++ struct vc_data *conp2 = vc_cons[logo_shown].d;
++
++ if (conp2->vc_top == logo_lines
++ && conp2->vc_bottom == conp2->vc_rows)
++ conp2->vc_top = 0;
++ if (logo_shown == vc->vc_num) {
++ unsigned long p, q;
++ int i;
++
++ p = softback_in;
++ q = vc->vc_origin +
++ logo_lines * vc->vc_size_row;
++ for (i = 0; i < logo_lines; i++) {
++ if (p == softback_top)
++ break;
++ if (p == softback_buf)
++ p = softback_end;
++ p -= vc->vc_size_row;
++ q -= vc->vc_size_row;
++ scr_memcpyw((u16 *) q, (u16 *) p,
++ vc->vc_size_row);
++ }
++ softback_in = softback_curr = p;
++ update_region(vc, vc->vc_origin,
++ logo_lines * vc->vc_cols);
++ }
++ logo_shown = FBCON_LOGO_CANSHOW;
++ }
++ fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
++ fbcon_redraw_softback(vc, disp, lines);
++ fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
++ return;
+ }
++
++ if (!scrollback_phys_max)
++ return;
++
++ scrollback_old = scrollback_current;
++ scrollback_current -= lines;
++ if (scrollback_current < 0)
++ scrollback_current = 0;
++ else if (scrollback_current > scrollback_max)
++ scrollback_current = scrollback_max;
++ if (scrollback_current == scrollback_old)
++ return;
++
++ if (fbcon_is_inactive(vc, info))
++ return;
++
++ fbcon_cursor(vc, CM_ERASE);
++
++ offset = disp->yscroll - scrollback_current;
++ limit = disp->vrows;
++ switch (disp->scrollmode) {
++ case SCROLL_WRAP_MOVE:
++ info->var.vmode |= FB_VMODE_YWRAP;
++ break;
++ case SCROLL_PAN_MOVE:
++ case SCROLL_PAN_REDRAW:
++ limit -= vc->vc_rows;
++ info->var.vmode &= ~FB_VMODE_YWRAP;
++ break;
++ }
++ if (offset < 0)
++ offset += limit;
++ else if (offset >= limit)
++ offset -= limit;
++
++ ops->var.xoffset = 0;
++ ops->var.yoffset = offset * vc->vc_font.height;
++ ops->update_start(info);
++
++ if (!scrollback_current)
++ fbcon_cursor(vc, CM_DRAW);
+ }
+
+ static int fbcon_set_origin(struct vc_data *vc)
+ {
++ if (softback_lines)
++ fbcon_scrolldelta(vc, softback_lines);
+ return 0;
+ }
+
+@@ -2700,6 +3020,8 @@
+
+ fbcon_set_palette(vc, color_table);
+ update_screen(vc);
++ if (softback_buf)
++ fbcon_update_softback(vc);
+ }
+ }
+
+@@ -3110,6 +3432,7 @@
+ .con_font_default = fbcon_set_def_font,
+ .con_font_copy = fbcon_copy_font,
+ .con_set_palette = fbcon_set_palette,
++ .con_scrolldelta = fbcon_scrolldelta,
+ .con_set_origin = fbcon_set_origin,
+ .con_invert_region = fbcon_invert_region,
+ .con_screen_pos = fbcon_screen_pos,
+@@ -3344,6 +3667,9 @@
+ }
+ #endif
+
++ kvfree((void *)softback_buf);
++ softback_buf = 0UL;
++
+ for_each_registered_fb(i) {
+ int pending = 0;
+
diff --git a/sys-kernel/pinephone-sources/files/0004-bootsplash.patch b/sys-kernel/pinephone-sources/files/0004-bootsplash.patch
new file mode 100644
index 0000000..7eb54af
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0004-bootsplash.patch
@@ -0,0 +1,215 @@
+diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c
+index 8c09c306ff67..07e3a4eab811 100644
+--- a/drivers/video/fbdev/core/bootsplash_render.c
++++ b/drivers/video/fbdev/core/bootsplash_render.c
+@@ -155,6 +155,7 @@ void bootsplash_do_render_pictures(struct fb_info *info,
+ for (i = 0; i < fp->header->num_pics; i++) {
+ struct splash_blob_priv *bp;
+ struct splash_pic_priv *pp = &fp->pics[i];
++ const struct splash_pic_header *ph = pp->pic_header;
+ long dst_xoff, dst_yoff;
+
+ if (pp->blobs_loaded < 1)
+@@ -165,8 +166,139 @@ void bootsplash_do_render_pictures(struct fb_info *info,
+ if (!bp || bp->blob_header->type != 0)
+ continue;
+
+- dst_xoff = (info->var.xres - pp->pic_header->width) / 2;
+- dst_yoff = (info->var.yres - pp->pic_header->height) / 2;
++ switch (ph->position) {
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP_LEFT:
++ dst_xoff = 0;
++ dst_yoff = 0;
++
++ dst_xoff += ph->position_offset;
++ dst_yoff += ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = 0;
++
++ dst_yoff += ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP_RIGHT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_yoff = 0;
++
++ dst_xoff -= ph->position_offset;
++ dst_yoff += ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_RIGHT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff -= ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM_RIGHT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++
++ dst_xoff -= ph->position_offset;
++ dst_yoff -= ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++
++ dst_yoff -= ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM_LEFT:
++ dst_xoff = 0 + ph->position_offset;
++ dst_yoff = info->var.yres - pp->pic_header->height
++ - ph->position_offset;
++ break;
++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_LEFT:
++ dst_xoff = 0;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff += ph->position_offset;
++ break;
++
++ case SPLASH_CORNER_TOP_LEFT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff -= ph->position_offset;
++ dst_yoff -= ph->position_offset;
++ break;
++ case SPLASH_CORNER_TOP:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_yoff -= ph->position_offset;
++ break;
++ case SPLASH_CORNER_TOP_RIGHT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff += ph->position_offset;
++ dst_yoff -= ph->position_offset;
++ break;
++ case SPLASH_CORNER_RIGHT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff += ph->position_offset;
++ break;
++ case SPLASH_CORNER_BOTTOM_RIGHT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff += ph->position_offset;
++ dst_yoff += ph->position_offset;
++ break;
++ case SPLASH_CORNER_BOTTOM:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_yoff += ph->position_offset;
++ break;
++ case SPLASH_CORNER_BOTTOM_LEFT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff -= ph->position_offset;
++ dst_yoff += ph->position_offset;
++ break;
++ case SPLASH_CORNER_LEFT:
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++
++ dst_xoff -= ph->position_offset;
++ break;
++
++ default:
++ /* As a fallback, center the picture. */
++ dst_xoff = info->var.xres - pp->pic_header->width;
++ dst_xoff /= 2;
++ dst_yoff = info->var.yres - pp->pic_header->height;
++ dst_yoff /= 2;
++ break;
++ }
+
+ if (dst_xoff < 0
+ || dst_yoff < 0
+diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h
+index 89dc9cca8f0c..71cedcc68933 100644
+--- a/include/uapi/linux/bootsplash_file.h
++++ b/include/uapi/linux/bootsplash_file.h
+@@ -91,7 +91,32 @@ struct splash_pic_header {
+ */
+ uint8_t num_blobs;
+
+- uint8_t padding[27];
++ /*
++ * Corner to move the picture to / from.
++ * 0x00 - Top left
++ * 0x01 - Top
++ * 0x02 - Top right
++ * 0x03 - Right
++ * 0x04 - Bottom right
++ * 0x05 - Bottom
++ * 0x06 - Bottom left
++ * 0x07 - Left
++ *
++ * Flags:
++ * 0x10 - Calculate offset from the corner towards the center,
++ * rather than from the center towards the corner
++ */
++ uint8_t position;
++
++ /*
++ * Pixel offset from the selected position.
++ * Example: If the picture is in the top right corner, it will
++ * be placed position_offset pixels from the top and
++ * position_offset pixels from the right margin.
++ */
++ uint16_t position_offset;
++
++ uint8_t padding[24];
+ } __attribute__((__packed__));
+
+
+@@ -115,4 +140,22 @@ struct splash_blob_header {
+ uint8_t padding[9];
+ } __attribute__((__packed__));
+
++
++
++
++/*
++ * Enums for on-disk types
++ */
++enum splash_position {
++ SPLASH_CORNER_TOP_LEFT = 0,
++ SPLASH_CORNER_TOP = 1,
++ SPLASH_CORNER_TOP_RIGHT = 2,
++ SPLASH_CORNER_RIGHT = 3,
++ SPLASH_CORNER_BOTTOM_RIGHT = 4,
++ SPLASH_CORNER_BOTTOM = 5,
++ SPLASH_CORNER_BOTTOM_LEFT = 6,
++ SPLASH_CORNER_LEFT = 7,
++ SPLASH_POS_FLAG_CORNER = 0x10,
++};
++
+ #endif
diff --git a/sys-kernel/pinephone-sources/files/0004-cdc-wdm-provide-wrapper-for-reset_resume.patch b/sys-kernel/pinephone-sources/files/0004-cdc-wdm-provide-wrapper-for-reset_resume.patch
new file mode 100644
index 0000000..b99092f
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0004-cdc-wdm-provide-wrapper-for-reset_resume.patch
@@ -0,0 +1,65 @@
+From 3af7a8b44f265a482c8297b420085cfb53725136 Mon Sep 17 00:00:00 2001
+From: Bhushan Shah
+Date: Wed, 14 Apr 2021 10:29:57 +0530
+Subject: [PATCH 4/5] cdc-wdm: provide wrapper for reset_resume
+
+---
+ drivers/usb/class/cdc-wdm.c | 35 ++++++++++++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 508b1c3f8b731..2b9355ed4a2ad 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -1119,6 +1119,39 @@ static int wdm_resume(struct usb_interface *intf)
+
+ return rv;
+ }
++
++static int wdm_reset_resume(struct usb_interface *intf)
++{
++ struct wdm_device *desc = wdm_find_device(intf);
++ int rv;
++
++ dev_dbg(&desc->intf->dev, "wdm%d_reset_resume\n", intf->minor);
++
++ spin_lock_irq(&desc->iuspin);
++ set_bit(WDM_RESETTING, &desc->flags);
++ set_bit(WDM_READ, &desc->flags);
++ clear_bit(WDM_IN_USE, &desc->flags);
++
++ desc->rerr = -EINTR;
++
++ spin_unlock_irq(&desc->iuspin);
++ wake_up_all(&desc->wait);
++ mutex_lock(&desc->rlock);
++ mutex_lock(&desc->wlock);
++ poison_urbs(desc);
++ cancel_work_sync(&desc->rxwork);
++ cancel_work_sync(&desc->service_outs_intr);
++
++ clear_bit(WDM_SUSPENDING, &desc->flags);
++ clear_bit(WDM_OVERFLOW, &desc->flags);
++ clear_bit(WDM_RESETTING, &desc->flags);
++
++ rv = recover_from_urb_loss(desc);
++ mutex_unlock(&desc->wlock);
++ mutex_unlock(&desc->rlock);
++
++ return rv;
++}
+ #endif
+
+ static int wdm_pre_reset(struct usb_interface *intf)
+@@ -1166,7 +1199,7 @@ static struct usb_driver wdm_driver = {
+ #ifdef CONFIG_PM
+ .suspend = wdm_suspend,
+ .resume = wdm_resume,
+- .reset_resume = wdm_resume,
++ .reset_resume = wdm_reset_resume,
+ #endif
+ .pre_reset = wdm_pre_reset,
+ .post_reset = wdm_post_reset,
+--
+2.31.1
+
diff --git a/sys-kernel/pinephone-sources/files/0004-efivarfs-revert-fix-memory-leak-in-efivarfs_create.patch b/sys-kernel/pinephone-sources/files/0004-efivarfs-revert-fix-memory-leak-in-efivarfs_create.patch
new file mode 100644
index 0000000..6589b85
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0004-efivarfs-revert-fix-memory-leak-in-efivarfs_create.patch
@@ -0,0 +1,58 @@
+From a163474e9b86c2c25f20733385d8b1d6de492a7f Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel
+Date: Wed, 25 Nov 2020 08:45:55 +0100
+Subject: efivarfs: revert "fix memory leak in efivarfs_create()"
+
+The memory leak addressed by commit fe5186cf12e3 is a false positive:
+all allocations are recorded in a linked list, and freed when the
+filesystem is unmounted. This leads to double frees, and as reported
+by David, leads to crashes if SLUB is configured to self destruct when
+double frees occur.
+
+So drop the redundant kfree() again, and instead, mark the offending
+pointer variable so the allocation is ignored by kmemleak.
+
+Cc: Vamshi K Sthambamkadi
+Fixes: fe5186cf12e3 ("efivarfs: fix memory leak in efivarfs_create()")
+Reported-by: David Laight
+Signed-off-by: Ard Biesheuvel
+---
+ fs/efivarfs/inode.c | 2 ++
+ fs/efivarfs/super.c | 1 -
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 96c0c86f3fff..0297ad95eb5c 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -7,6 +7,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+ #include
+
+@@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ var->var.VariableName[i] = '\0';
+
+ inode->i_private = var;
++ kmemleak_ignore(var);
+
+ err = efivar_entry_add(var, &efivarfs_list);
+ if (err)
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index f943fd0b0699..15880a68faad 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -21,7 +21,6 @@ LIST_HEAD(efivarfs_list);
+ static void efivarfs_evict_inode(struct inode *inode)
+ {
+ clear_inode(inode);
+- kfree(inode->i_private);
+ }
+
+ static const struct super_operations efivarfs_ops = {
+--
+cgit v1.2.3-1-gf6bb5
+
diff --git a/sys-kernel/pinephone-sources/files/0005-bootsplash.patch b/sys-kernel/pinephone-sources/files/0005-bootsplash.patch
new file mode 100644
index 0000000..2785c5e
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0005-bootsplash.patch
@@ -0,0 +1,327 @@
+diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c
+index 815b007f81ca..c8642142cfea 100644
+--- a/drivers/video/fbdev/core/bootsplash.c
++++ b/drivers/video/fbdev/core/bootsplash.c
+@@ -53,6 +53,14 @@ static void splash_callback_redraw_vc(struct work_struct *ignored)
+ console_unlock();
+ }
+
++static void splash_callback_animation(struct work_struct *ignored)
++{
++ if (bootsplash_would_render_now()) {
++ /* This will also re-schedule this delayed worker */
++ splash_callback_redraw_vc(ignored);
++ }
++}
++
+
+ static bool is_fb_compatible(const struct fb_info *info)
+ {
+@@ -103,17 +111,44 @@ static bool is_fb_compatible(const struct fb_info *info)
+ */
+ void bootsplash_render_full(struct fb_info *info)
+ {
++ bool is_update = false;
++
+ mutex_lock(&splash_state.data_lock);
+
+- if (!is_fb_compatible(info))
+- goto out;
++ /*
++ * If we've painted on this FB recently, we don't have to do
++ * the sanity checks and background drawing again.
++ */
++ if (splash_state.splash_fb == info)
++ is_update = true;
++
++
++ if (!is_update) {
++ /* Check whether we actually support this FB. */
++ splash_state.splash_fb = NULL;
++
++ if (!is_fb_compatible(info))
++ goto out;
++
++ /* Draw the background only once */
++ bootsplash_do_render_background(info, splash_state.file);
+
+- bootsplash_do_render_background(info, splash_state.file);
++ /* Mark this FB as last seen */
++ splash_state.splash_fb = info;
++ }
+
+- bootsplash_do_render_pictures(info, splash_state.file);
++ bootsplash_do_render_pictures(info, splash_state.file, is_update);
+
+ bootsplash_do_render_flush(info);
+
++ bootsplash_do_step_animations(splash_state.file);
++
++ /* Schedule update for animated splash screens */
++ if (splash_state.file->frame_ms > 0)
++ schedule_delayed_work(&splash_state.dwork_animation,
++ msecs_to_jiffies(
++ splash_state.file->frame_ms));
++
+ out:
+ mutex_unlock(&splash_state.data_lock);
+ }
+@@ -169,8 +204,14 @@ void bootsplash_enable(void)
+
+ was_enabled = test_and_set_bit(0, &splash_state.enabled);
+
+- if (!was_enabled)
++ if (!was_enabled) {
++ /* Force a full redraw when the splash is re-activated */
++ mutex_lock(&splash_state.data_lock);
++ splash_state.splash_fb = NULL;
++ mutex_unlock(&splash_state.data_lock);
++
+ schedule_work(&splash_state.work_redraw_vc);
++ }
+ }
+
+
+@@ -227,6 +268,14 @@ ATTRIBUTE_GROUPS(splash_dev);
+ */
+ static int splash_resume(struct device *device)
+ {
++ /*
++ * Force full redraw on resume since we've probably lost the
++ * framebuffer's contents meanwhile
++ */
++ mutex_lock(&splash_state.data_lock);
++ splash_state.splash_fb = NULL;
++ mutex_unlock(&splash_state.data_lock);
++
+ if (bootsplash_would_render_now())
+ schedule_work(&splash_state.work_redraw_vc);
+
+@@ -235,6 +284,7 @@ static int splash_resume(struct device *device)
+
+ static int splash_suspend(struct device *device)
+ {
++ cancel_delayed_work_sync(&splash_state.dwork_animation);
+ cancel_work_sync(&splash_state.work_redraw_vc);
+
+ return 0;
+@@ -296,6 +346,8 @@ void bootsplash_init(void)
+ set_bit(0, &splash_state.enabled);
+
+ INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc);
++ INIT_DELAYED_WORK(&splash_state.dwork_animation,
++ splash_callback_animation);
+
+
+ if (!splash_state.bootfile || !strlen(splash_state.bootfile))
+diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h
+index 0acb383aa4e3..b3a74835d90f 100644
+--- a/drivers/video/fbdev/core/bootsplash_internal.h
++++ b/drivers/video/fbdev/core/bootsplash_internal.h
+@@ -37,6 +37,8 @@ struct splash_pic_priv {
+
+ struct splash_blob_priv *blobs;
+ u16 blobs_loaded;
++
++ u16 anim_nextframe;
+ };
+
+
+@@ -45,6 +47,12 @@ struct splash_file_priv {
+ const struct splash_file_header *header;
+
+ struct splash_pic_priv *pics;
++
++ /*
++ * A local copy of the frame delay in the header.
++ * We modify it to keep the code simple.
++ */
++ u16 frame_ms;
+ };
+
+
+@@ -71,6 +79,7 @@ struct splash_priv {
+ struct platform_device *splash_device;
+
+ struct work_struct work_redraw_vc;
++ struct delayed_work dwork_animation;
+
+ /* Splash data structures including lock for everything below */
+ struct mutex data_lock;
+@@ -88,8 +97,10 @@ struct splash_priv {
+ void bootsplash_do_render_background(struct fb_info *info,
+ const struct splash_file_priv *fp);
+ void bootsplash_do_render_pictures(struct fb_info *info,
+- const struct splash_file_priv *fp);
++ const struct splash_file_priv *fp,
++ bool is_update);
+ void bootsplash_do_render_flush(struct fb_info *info);
++void bootsplash_do_step_animations(struct splash_file_priv *fp);
+
+
+ void bootsplash_free_file(struct splash_file_priv *fp);
+diff --git a/drivers/video/fbdev/core/bootsplash_load.c b/drivers/video/fbdev/core/bootsplash_load.c
+index fd807571ab7d..1f661b2d4cc9 100644
+--- a/drivers/video/fbdev/core/bootsplash_load.c
++++ b/drivers/video/fbdev/core/bootsplash_load.c
+@@ -71,6 +71,7 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device,
+ {
+ const struct firmware *fw;
+ struct splash_file_priv *fp;
++ bool have_anim = false;
+ unsigned int i;
+ const u8 *walker;
+
+@@ -135,6 +136,13 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device,
+ goto err;
+ }
+
++ if (ph->anim_type > SPLASH_ANIM_LOOP_FORWARD) {
++ pr_warn("Picture %u: Unsupported animation type %u.\n",
++ i, ph->anim_type);
++
++ ph->anim_type = SPLASH_ANIM_NONE;
++ }
++
+ pp->pic_header = ph;
+ pp->blobs = vzalloc(ph->num_blobs
+ * sizeof(struct splash_blob_priv));
+@@ -202,6 +210,7 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device,
+ /* Walk over pictures and ensure all blob slots are filled */
+ for (i = 0; i < fp->header->num_pics; i++) {
+ struct splash_pic_priv *pp = &fp->pics[i];
++ const struct splash_pic_header *ph = pp->pic_header;
+
+ if (pp->blobs_loaded != pp->pic_header->num_blobs) {
+ pr_err("Picture %u doesn't have all blob slots filled.\n",
+@@ -209,8 +218,20 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device,
+
+ goto err;
+ }
++
++ if (ph->anim_type
++ && ph->num_blobs > 1
++ && ph->anim_loop < pp->blobs_loaded)
++ have_anim = true;
+ }
+
++ if (!have_anim)
++ /* Disable animation timer if there is nothing to animate */
++ fp->frame_ms = 0;
++ else
++ /* Enforce minimum delay between frames */
++ fp->frame_ms = max((u16)20, fp->header->frame_ms);
++
+ pr_info("Loaded (%ld bytes, %u pics, %u blobs).\n",
+ fw->size,
+ fp->header->num_pics,
+diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c
+index 07e3a4eab811..76033606ca8a 100644
+--- a/drivers/video/fbdev/core/bootsplash_render.c
++++ b/drivers/video/fbdev/core/bootsplash_render.c
+@@ -148,7 +148,8 @@ void bootsplash_do_render_background(struct fb_info *info,
+
+
+ void bootsplash_do_render_pictures(struct fb_info *info,
+- const struct splash_file_priv *fp)
++ const struct splash_file_priv *fp,
++ bool is_update)
+ {
+ unsigned int i;
+
+@@ -161,7 +162,11 @@ void bootsplash_do_render_pictures(struct fb_info *info,
+ if (pp->blobs_loaded < 1)
+ continue;
+
+- bp = &pp->blobs[0];
++ /* Skip static pictures when refreshing animations */
++ if (ph->anim_type == SPLASH_ANIM_NONE && is_update)
++ continue;
++
++ bp = &pp->blobs[pp->anim_nextframe];
+
+ if (!bp || bp->blob_header->type != 0)
+ continue;
+@@ -351,3 +356,24 @@ void bootsplash_do_render_flush(struct fb_info *info)
+ info->fbops->fb_copyarea(info, &area);
+ }
+ }
++
++
++void bootsplash_do_step_animations(struct splash_file_priv *fp)
++{
++ unsigned int i;
++
++ /* Step every animation once */
++ for (i = 0; i < fp->header->num_pics; i++) {
++ struct splash_pic_priv *pp = &fp->pics[i];
++
++ if (pp->blobs_loaded < 2
++ || pp->pic_header->anim_loop > pp->blobs_loaded)
++ continue;
++
++ if (pp->pic_header->anim_type == SPLASH_ANIM_LOOP_FORWARD) {
++ pp->anim_nextframe++;
++ if (pp->anim_nextframe >= pp->pic_header->num_blobs)
++ pp->anim_nextframe = pp->pic_header->anim_loop;
++ }
++ }
++}
+diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h
+index 71cedcc68933..b3af0a3c6487 100644
+--- a/include/uapi/linux/bootsplash_file.h
++++ b/include/uapi/linux/bootsplash_file.h
+@@ -77,7 +77,17 @@ struct splash_file_header {
+ uint16_t num_blobs;
+ uint8_t num_pics;
+
+- uint8_t padding[103];
++ uint8_t unused_1;
++
++ /*
++ * Milliseconds to wait before painting the next frame in
++ * an animation.
++ * This is actually a minimum, as the system is allowed to
++ * stall for longer between frames.
++ */
++ uint16_t frame_ms;
++
++ uint8_t padding[100];
+ } __attribute__((__packed__));
+
+
+@@ -116,7 +126,23 @@ struct splash_pic_header {
+ */
+ uint16_t position_offset;
+
+- uint8_t padding[24];
++ /*
++ * Animation type.
++ * 0 - off
++ * 1 - forward loop
++ */
++ uint8_t anim_type;
++
++ /*
++ * Animation loop point.
++ * Actual meaning depends on animation type:
++ * Type 0 - Unused
++ * 1 - Frame at which to restart the forward loop
++ * (allowing for "intro" frames)
++ */
++ uint8_t anim_loop;
++
++ uint8_t padding[22];
+ } __attribute__((__packed__));
+
+
+@@ -158,4 +184,9 @@ enum splash_position {
+ SPLASH_POS_FLAG_CORNER = 0x10,
+ };
+
++enum splash_anim_type {
++ SPLASH_ANIM_NONE = 0,
++ SPLASH_ANIM_LOOP_FORWARD = 1,
++};
++
+ #endif
diff --git a/sys-kernel/pinephone-sources/files/0005-net-usb-qmi_wwan-set-the-DTR-when-resuming.patch b/sys-kernel/pinephone-sources/files/0005-net-usb-qmi_wwan-set-the-DTR-when-resuming.patch
new file mode 100644
index 0000000..6fe7e32
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0005-net-usb-qmi_wwan-set-the-DTR-when-resuming.patch
@@ -0,0 +1,34 @@
+From ed73c96e313c549f710df58c8fbe47200ee13df1 Mon Sep 17 00:00:00 2001
+From: Bhushan Shah
+Date: Sat, 10 Apr 2021 08:52:05 +0530
+Subject: [PATCH 5/5] net: usb: qmi_wwan: set the DTR when resuming
+
+If usb device does reset_resume instead of unbind/bind, we need to
+re-enable the DTR quirk, that way after resuming connection QMI
+communication between host and modem is possible again.
+
+Signed-off-by: Bhushan Shah
+Tested-by: Dalton Durst
+---
+ drivers/net/usb/qmi_wwan.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index cd6ae9696b56a..ada94a3242146 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -855,6 +855,11 @@ static int qmi_wwan_reset_resume(struct usb_interface *intf)
+ ret = usbnet_resume(intf);
+ if (ret < 0 && callsub)
+ info->subdriver->suspend(intf, PMSG_SUSPEND);
++
++ if (dev->driver_info->data & QMI_WWAN_QUIRK_DTR ||
++ le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
++ qmi_wwan_change_dtr(dev, true);
++ }
+ err:
+ return ret;
+ }
+--
+2.31.1
+
diff --git a/sys-kernel/pinephone-sources/files/0006-bootsplash.patch b/sys-kernel/pinephone-sources/files/0006-bootsplash.patch
new file mode 100644
index 0000000..d6c6db6
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0006-bootsplash.patch
@@ -0,0 +1,82 @@
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 2ebaba16f785..416735ab6dc1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -105,6 +105,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #define MAX_NR_CON_DRIVER 16
+
+@@ -4235,6 +4236,7 @@ void do_unblank_screen(int leaving_gfx)
+ }
+
+ console_blanked = 0;
++ bootsplash_mark_dirty();
+ if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
+ /* Low-level driver cannot restore -> do it ourselves */
+ update_screen(vc);
+diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c
+index c8642142cfea..13fcaabbc2ca 100644
+--- a/drivers/video/fbdev/core/bootsplash.c
++++ b/drivers/video/fbdev/core/bootsplash.c
+@@ -165,6 +165,13 @@ bool bootsplash_would_render_now(void)
+ && bootsplash_is_enabled();
+ }
+
++void bootsplash_mark_dirty(void)
++{
++ mutex_lock(&splash_state.data_lock);
++ splash_state.splash_fb = NULL;
++ mutex_unlock(&splash_state.data_lock);
++}
++
+ bool bootsplash_is_enabled(void)
+ {
+ bool was_enabled;
+@@ -206,9 +213,7 @@ void bootsplash_enable(void)
+
+ if (!was_enabled) {
+ /* Force a full redraw when the splash is re-activated */
+- mutex_lock(&splash_state.data_lock);
+- splash_state.splash_fb = NULL;
+- mutex_unlock(&splash_state.data_lock);
++ bootsplash_mark_dirty();
+
+ schedule_work(&splash_state.work_redraw_vc);
+ }
+@@ -272,9 +277,7 @@ static int splash_resume(struct device *device)
+ * Force full redraw on resume since we've probably lost the
+ * framebuffer's contents meanwhile
+ */
+- mutex_lock(&splash_state.data_lock);
+- splash_state.splash_fb = NULL;
+- mutex_unlock(&splash_state.data_lock);
++ bootsplash_mark_dirty();
+
+ if (bootsplash_would_render_now())
+ schedule_work(&splash_state.work_redraw_vc);
+diff --git a/include/linux/bootsplash.h b/include/linux/bootsplash.h
+index c6dd0b43180d..4075098aaadd 100644
+--- a/include/linux/bootsplash.h
++++ b/include/linux/bootsplash.h
+@@ -19,6 +19,8 @@ extern void bootsplash_render_full(struct fb_info *info);
+
+ extern bool bootsplash_would_render_now(void);
+
++extern void bootsplash_mark_dirty(void);
++
+ extern bool bootsplash_is_enabled(void);
+ extern void bootsplash_disable(void);
+ extern void bootsplash_enable(void);
+@@ -31,6 +33,8 @@ extern void bootsplash_init(void);
+
+ #define bootsplash_would_render_now() (false)
+
++#define bootsplash_mark_dirty()
++
+ #define bootsplash_is_enabled() (false)
+ #define bootsplash_disable()
+ #define bootsplash_enable()
diff --git a/sys-kernel/pinephone-sources/files/0006-cdc-wdm-send-HUP-if-we-are-resetting.patch b/sys-kernel/pinephone-sources/files/0006-cdc-wdm-send-HUP-if-we-are-resetting.patch
new file mode 100644
index 0000000..072bda0
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0006-cdc-wdm-send-HUP-if-we-are-resetting.patch
@@ -0,0 +1,27 @@
+From 1671ef2de0f3f698622bed7ba0e9a605fdd260fc Mon Sep 17 00:00:00 2001
+From: Bhushan Shah
+Date: Wed, 14 Apr 2021 18:58:41 +0530
+Subject: [PATCH 6/6] cdc-wdm: send HUP if we are resetting
+
+If userspace is polling the cdc-wdm socket, and device resets then we
+should notify userspace/client about reset.
+---
+ drivers/usb/class/cdc-wdm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 2b9355ed4a2ad..93d9bacc18384 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -666,6 +666,8 @@ static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+ goto desc_out;
+ }
++ if (test_bit(WDM_RESETTING, &desc->flags))
++ mask = EPOLLHUP;
+ if (test_bit(WDM_READ, &desc->flags))
+ mask = EPOLLIN | EPOLLRDNORM;
+ if (desc->rerr || desc->werr)
+--
+2.31.1
+
diff --git a/sys-kernel/pinephone-sources/files/0007-bootsplash.patch b/sys-kernel/pinephone-sources/files/0007-bootsplash.patch
new file mode 100644
index 0000000..3f82eb0
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0007-bootsplash.patch
@@ -0,0 +1,42 @@
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index f4166263bb3a..a248429194bb 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -49,6 +49,8 @@
+
+ #include
+
++#include
++
+ /*
+ * Exported functions/variables
+ */
+@@ -1413,6 +1415,28 @@ static void kbd_keycode(unsigned int key
+ }
+ #endif
+
++ /* Trap keys when bootsplash is shown */
++ if (bootsplash_would_render_now()) {
++ /* Deactivate bootsplash on ESC or Alt+Fxx VT switch */
++ if (keycode >= KEY_F1 && keycode <= KEY_F12) {
++ bootsplash_disable();
++
++ /*
++ * No return here since we want to actually
++ * perform the VT switch.
++ */
++ } else {
++ if (keycode == KEY_ESC)
++ bootsplash_disable();
++
++ /*
++ * Just drop any other keys.
++ * Their effect would be hidden by the splash.
++ */
++ return;
++ }
++ }
++
+ if (kbd->kbdmode == VC_MEDIUMRAW) {
+ /*
+ * This is extended medium raw mode, with keys above 127
diff --git a/sys-kernel/pinephone-sources/files/0008-bootsplash.patch b/sys-kernel/pinephone-sources/files/0008-bootsplash.patch
new file mode 100644
index 0000000..8a3b715
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0008-bootsplash.patch
@@ -0,0 +1,21 @@
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 3ffc1ce29023..bc6a24c9dfa8 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -49,6 +49,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #include
+@@ -104,6 +105,8 @@ static void sysrq_handle_SAK(int key)
+ {
+ struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
+ schedule_work(SAK_work);
++
++ bootsplash_disable();
+ }
+ static struct sysrq_key_op sysrq_SAK_op = {
+ .handler = sysrq_handle_SAK,
diff --git a/sys-kernel/pinephone-sources/files/0009-bootsplash.patch b/sys-kernel/pinephone-sources/files/0009-bootsplash.patch
new file mode 100644
index 0000000..add68e7
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0009-bootsplash.patch
@@ -0,0 +1,21 @@
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 9a39a6fcfe98..8a9c67e1c5d8 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1343,6 +1343,16 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
+ int y;
+ int c = scr_readw((u16 *) vc->vc_pos);
+
++ /*
++ * Disable the splash here so we don't have to hook into
++ * vt_console_print() in drivers/tty/vt/vt.c
++ *
++ * We'd disable the splash just before the call to
++ * hide_cursor() anyway, so this spot is just fine.
++ */
++ if (oops_in_progress)
++ bootsplash_disable();
++
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
+ if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
diff --git a/sys-kernel/pinephone-sources/files/0010-bootsplash.patch b/sys-kernel/pinephone-sources/files/0010-bootsplash.patch
new file mode 100644
index 0000000..e5c1fd0
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0010-bootsplash.patch
@@ -0,0 +1,321 @@
+diff --git a/Documentation/ABI/testing/sysfs-platform-bootsplash b/Documentation/ABI/testing/sysfs-platform-bootsplash
+new file mode 100644
+index 000000000000..742c7b035ded
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-platform-bootsplash
+@@ -0,0 +1,11 @@
++What: /sys/devices/platform/bootsplash.0/enabled
++Date: Oct 2017
++KernelVersion: 4.14
++Contact: Max Staudt
++Description:
++ Can be set and read.
++
++ 0: Splash is disabled.
++ 1: Splash is shown whenever fbcon would show a text console
++ (i.e. no graphical application is running), and a splash
++ file is loaded.
+diff --git a/Documentation/bootsplash.rst b/Documentation/bootsplash.rst
+new file mode 100644
+index 000000000000..611f0c558925
+--- /dev/null
++++ b/Documentation/bootsplash.rst
+@@ -0,0 +1,285 @@
++====================
++The Linux bootsplash
++====================
++
++:Date: November, 2017
++:Author: Max Staudt
++
++
++The Linux bootsplash is a graphical replacement for the '``quiet``' boot
++option, typically showing a logo and a spinner animation as the system starts.
++
++Currently, it is a part of the Framebuffer Console support, and can be found
++as ``CONFIG_BOOTSPLASH`` in the kernel configuration. This means that as long
++as it is enabled, it hijacks fbcon's output and draws a splash screen instead.
++
++Purely compiling in the bootsplash will not render it functional - to actually
++render a splash, you will also need a splash theme file. See the example
++utility and script in ``tools/bootsplash`` for a live demo.
++
++
++
++Motivation
++==========
++
++- The '``quiet``' boot option only suppresses most messages during boot, but
++ errors are still shown.
++
++- A user space implementation can only show a logo once user space has been
++ initialized far enough to allow this. A kernel splash can display a splash
++ immediately as soon as fbcon can be displayed.
++
++- Implementing a splash screen in user space (e.g. Plymouth) is problematic
++ due to resource conflicts.
++
++ For example, if Plymouth is keeping ``/dev/fb0`` (provided via vesafb/efifb)
++ open, then most DRM drivers can't replace it because the address space is
++ still busy - thus leading to a VRAM reservation error.
++
++ See: https://bugzilla.opensuse.org/show_bug.cgi?id=980750
++
++
++
++Command line arguments
++======================
++
++``bootsplash.bootfile``
++ Which file in the initramfs to load.
++
++ The splash theme is loaded via request_firmware(), thus to load
++ ``/lib/firmware/bootsplash/mytheme`` pass the command line:
++
++ ``bootsplash.bootfile=bootsplash/mytheme``
++
++ Note: The splash file *has to be* in the initramfs, as it needs to be
++ available when the splash is initialized early on.
++
++ Default: none, i.e. a non-functional splash, falling back to showing text.
++
++
++
++sysfs run-time configuration
++============================
++
++``/sys/devices/platform/bootsplash.0/enabled``
++ Enable/disable the bootsplash.
++ The system boots with this set to 1, but will not show a splash unless
++ a splash theme file is also loaded.
++
++
++
++Kconfig
++=======
++
++``BOOTSPLASH``
++ Whether to compile in bootsplash support
++ (depends on fbcon compiled in, i.e. ``FRAMEBUFFER_CONSOLE=y``)
++
++
++
++Bootsplash file format
++======================
++
++A file specified in the kernel configuration as ``CONFIG_BOOTSPLASH_FILE``
++or specified on the command line as ``bootsplash.bootfile`` will be loaded
++and displayed as soon as fbcon is initialized.
++
++
++Main blocks
++-----------
++
++There are 3 main blocks in each file:
++
++ - one File header
++ - n Picture headers
++ - m (Blob header + payload) blocks
++
++
++Structures
++----------
++
++The on-disk structures are defined in
++``drivers/video/fbdev/core/bootsplash_file.h`` and represent these blocks:
++
++ - ``struct splash_file_header``
++
++ Represents the file header, with splash-wide information including:
++
++ - The magic string "``Linux bootsplash``" on big-endian platforms
++ (the reverse on little endian)
++ - The file format version (for incompatible updates, hopefully never)
++ - The background color
++ - Number of picture and blob blocks
++ - Animation speed (we only allow one delay for all animations)
++
++ The file header is followed by the first picture header.
++
++
++ - ``struct splash_picture_header``
++
++ Represents an object (picture) drawn on screen, including its immutable
++ properties:
++ - Width, height
++ - Positioning relative to screen corners or in the center
++ - Animation, if any
++ - Animation type
++ - Number of blobs
++
++ The picture header is followed by another picture header, up until n
++ picture headers (as defined in the file header) have been read. Then,
++ the (blob header, payload) pairs follow.
++
++
++ - ``struct splash_blob_header``
++ (followed by payload)
++
++ Represents one raw data stream. So far, only picture data is defined.
++
++ The blob header is followed by a payload, then padding to n*16 bytes,
++ then (if further blobs are defined in the file header) a further blob
++ header.
++
++
++Alignment
++---------
++
++The bootsplash file is designed to be loaded into memory as-is.
++
++All structures are a multiple of 16 bytes long, all elements therein are
++aligned to multiples of their length, and the payloads are always padded
++up to multiples of 16 bytes. This is to allow aligned accesses in all
++cases while still simply mapping the structures over an in-memory copy of
++the bootsplash file.
++
++
++Further information
++-------------------
++
++Please see ``drivers/video/fbdev/core/bootsplash_file.h`` for further
++details and possible values in the file.
++
++
++
++Hooks - how the bootsplash is integrated
++========================================
++
++``drivers/video/fbdev/core/fbcon.c``
++ ``fbcon_init()`` calls ``bootsplash_init()``, which loads the default
++ bootsplash file or the one specified on the kernel command line.
++
++ ``fbcon_switch()`` draws the bootsplash when it's active, and is also
++ one of the callers of ``set_blitting_type()``.
++
++ ``set_blitting_type()`` calls ``fbcon_set_dummyops()`` when the
++ bootsplash is active, overriding the text rendering functions.
++
++ ``fbcon_cursor()`` will call ``bootsplash_disable()`` when an oops is
++ being printed in order to make a kernel panic visible.
++
++``drivers/video/fbdev/core/dummyblit.c``
++ This contains the dummy text rendering functions used to suppress text
++ output while the bootsplash is shown.
++
++``drivers/tty/vt/keyboard.c``
++ ``kbd_keycode()`` can call ``bootsplash_disable()`` when the user
++ presses ESC or F1-F12 (changing VT). This is to provide a built-in way
++ of disabling the splash manually at any time.
++
++
++
++FAQ: Frequently Asked Questions
++===============================
++
++I want to see the log! How do I show the log?
++---------------------------------------------
++
++Press ESC while the splash is shown, or remove the ``bootsplash.bootfile``
++parameter from the kernel cmdline. Without that parameter, the bootsplash
++will boot disabled.
++
++
++Why use FB instead of modern DRM/KMS?
++-------------------------------------
++
++This is a semantic problem:
++ - What memory to draw the splash to?
++ - And what mode will the screen be set to?
++
++Using the fbdev emulation solves these issues.
++
++Let's start from a bare KMS system, without fbcon, and without fbdev
++emulation. In this case, as long as userspace doesn't open the KMS
++device, the state of the screen is undefined. No framebuffer is
++allocated in video RAM, and no particular mode is set.
++
++In this case, we'd have to allocate a framebuffer to show the splash,
++and set our mode ourselves. This either wastes a screenful of video RAM
++if the splash is to co-exist with the userspace program's own allocated
++framebuffer, or there is a flicker as we deactivate and delete the
++bootsplash's framebuffer and hand control over to userspace. Since we
++may set a different mode than userspace, we'd also have flicker due
++to mode switching.
++
++This logic is already contained in every KMS driver that performs fbdev
++emulation. So we might as well use that. And the correct API to do so is
++fbdev. Plus, we get compatibility with old, pure fbdev drivers for free.
++With the fbdev emulation, there is *always* a well-defined framebuffer
++to draw on. And the selection of mode has already been done by the
++graphics driver, so we don't need to reinvent that wheel, either.
++Finally, if userspace decides to use /dev/fbX, we don't have to worry
++about wasting video RAM, either.
++
++
++Why is the bootsplash integrated in fbcon?
++------------------------------------------
++
++Right now, the bootsplash is drawn from within fbcon, as this allows us
++to easily know *when* to draw - i.e. when we're safe from fbcon and
++userspace drawing all over our beautiful splash logo.
++
++Separating them is not easy - see the to-do list below.
++
++
++
++TO DO list for future development
++=================================
++
++Second enable/disable switch for the system
++-------------------------------------------
++
++It may be helpful to differentiate between the system and the user
++switching off the bootsplash. Thus, the system may make it disappear and
++reappear e.g. for a password prompt, yet once the user has pressed ESC,
++it could stay gone.
++
++
++Fix buggy DRM/KMS drivers
++-------------------------
++
++Currently, the splash code manually checks for fbdev emulation provided by
++the ast, cirrus, and mgag200 DRM/KMS drivers.
++These drivers use a manual mechanism similar to deferred I/O for their FB
++emulation, and thus need to be manually flushed onto the screen in the same
++way.
++
++This may be improved upon in several ways:
++
++1. Changing these drivers to expose the fbdev BO's memory directly, like
++ bochsdrmfb does.
++2. Creating a new fb_ops->fb_flush() API to allow the kernel to flush the
++ framebuffer once the bootsplash has been drawn into it.
++
++
++Separating from fbcon
++---------------------
++
++Separating these two components would yield independence from fbcon being
++compiled into the kernel, and thus lowering code size in embedded
++applications.
++
++To do this cleanly will involve a clean separation of users of an FB device
++within the kernel, i.e. fbcon, bootsplash, and userspace. Right now, the
++legacy fbcon code and VT code co-operate to switch between fbcon and
++userspace (by setting the VT into KD_GRAPHICS mode). Installing a muxer
++between these components ensues refactoring of old code and checking for
++correct locking.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 5c237445761e..7ffac272434e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2709,6 +2709,8 @@ BOOTSPLASH
+ M: Max Staudt
+ L: linux-fbdev@vger.kernel.org
+ S: Maintained
++F: Documentation/ABI/testing/sysfs-platform-bootsplash
++F: Documentation/bootsplash.rst
+ F: drivers/video/fbdev/core/bootsplash*.*
+ F: drivers/video/fbdev/core/dummycon.c
+ F: include/linux/bootsplash.h
diff --git a/sys-kernel/pinephone-sources/files/0010-dts-pinephone-Add-pine64-pinephone-to-compat-list.patch b/sys-kernel/pinephone-sources/files/0010-dts-pinephone-Add-pine64-pinephone-to-compat-list.patch
new file mode 100644
index 0000000..156e837
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0010-dts-pinephone-Add-pine64-pinephone-to-compat-list.patch
@@ -0,0 +1,56 @@
+From 05044b9e4e4ae03f66e1c504d6fef57a1d135897 Mon Sep 17 00:00:00 2001
+From: Dylan Van Assche
+Date: Thu, 24 Dec 2020 19:57:12 +0100
+Subject: [PATCH] dts: pinephone: Add 'pine64,pinephone' to compat list
+
+Indicates that all PinePhone models share most of the hardware with each other.
+Used for feedbackd configuration when retrieving a device specific config for
+haptic feedbackd.
+---
+ arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts | 2 +-
+ arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts | 2 +-
+ arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
+index 0f6faa44c..2e0892b32 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts
+@@ -7,7 +7,7 @@
+
+ / {
+ model = "Pine64 PinePhone Developer Batch (1.0)";
+- compatible = "pine64,pinephone-1.0", "allwinner,sun50i-a64";
++ compatible = "pine64,pinephone-1.0", "pine64,pinephone", "allwinner,sun50i-a64";
+
+ reg_vbus: usb0-vbus {
+ compatible = "regulator-fixed";
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+index 95a880fdc..d6bad0838 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+@@ -7,7 +7,7 @@
+
+ / {
+ model = "Pine64 PinePhone Braveheart (1.1)";
+- compatible = "pine64,pinephone-1.1", "allwinner,sun50i-a64";
++ compatible = "pine64,pinephone-1.1", "pine64,pinephone", "allwinner,sun50i-a64";
+
+ reg_vbus: usb0-vbus {
+ compatible = "regulator-fixed";
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+index 23ba72508..710493186 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+@@ -7,7 +7,7 @@
+
+ / {
+ model = "Pine64 PinePhone (1.2)";
+- compatible = "pine64,pinephone-1.2", "allwinner,sun50i-a64";
++ compatible = "pine64,pinephone-1.2", "pine64,pinephone", "allwinner,sun50i-a64";
+
+ wifi_pwrseq: wifi-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+--
+2.26.2
+
diff --git a/sys-kernel/pinephone-sources/files/0011-bootsplash.patch b/sys-kernel/pinephone-sources/files/0011-bootsplash.patch
new file mode 100644
index 0000000..8e87eb4
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0011-bootsplash.patch
@@ -0,0 +1,129 @@
+diff --git a/Documentation/ABI/testing/sysfs-platform-bootsplash b/Documentation/ABI/testing/sysfs-platform-bootsplash
+index 742c7b035ded..f8f4b259220e 100644
+--- a/Documentation/ABI/testing/sysfs-platform-bootsplash
++++ b/Documentation/ABI/testing/sysfs-platform-bootsplash
+@@ -9,3 +9,35 @@ Description:
+ 1: Splash is shown whenever fbcon would show a text console
+ (i.e. no graphical application is running), and a splash
+ file is loaded.
++
++What: /sys/devices/platform/bootsplash.0/drop_splash
++Date: Oct 2017
++KernelVersion: 4.14
++Contact: Max Staudt
++Description:
++ Can only be set.
++
++ Any value written will cause the current splash theme file
++ to be unloaded and the text console to be redrawn.
++
++What: /sys/devices/platform/bootsplash.0/load_file
++Date: Oct 2017
++KernelVersion: 4.14
++Contact: Max Staudt
++Description:
++ Can only be set.
++
++ Any value written will cause the splash to be disabled and
++ internal memory structures to be freed.
++
++ A firmware path written will cause a new theme file to be
++ loaded and the current bootsplash to be replaced.
++ The current enabled/disabled status is not touched.
++ If the splash is already active, it will be redrawn.
++
++ The path has to be a path in /lib/firmware since
++ request_firmware() is used to fetch the data.
++
++ When setting the splash from the shell, echo -n has to be
++ used as any trailing '\n' newline will be interpreted as
++ part of the path.
+diff --git a/Documentation/bootsplash.rst b/Documentation/bootsplash.rst
+index 611f0c558925..b35aba5093e8 100644
+--- a/Documentation/bootsplash.rst
++++ b/Documentation/bootsplash.rst
+@@ -67,6 +67,14 @@ sysfs run-time configuration
+ a splash theme file is also loaded.
+
+
++``/sys/devices/platform/bootsplash.0/drop_splash``
++ Unload splash data and free memory.
++
++``/sys/devices/platform/bootsplash.0/load_file``
++ Load a splash file from ``/lib/firmware/``.
++ Note that trailing newlines will be interpreted as part of the file name.
++
++
+
+ Kconfig
+ =======
+diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c
+index 13fcaabbc2ca..16cb0493629d 100644
+--- a/drivers/video/fbdev/core/bootsplash.c
++++ b/drivers/video/fbdev/core/bootsplash.c
+@@ -251,11 +251,65 @@ static ssize_t splash_store_enabled(struct device *device,
+ return count;
+ }
+
++static ssize_t splash_store_drop_splash(struct device *device,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct splash_file_priv *fp;
++
++ if (!buf || !count || !splash_state.file)
++ return count;
++
++ mutex_lock(&splash_state.data_lock);
++ fp = splash_state.file;
++ splash_state.file = NULL;
++ mutex_unlock(&splash_state.data_lock);
++
++ /* Redraw the text console */
++ schedule_work(&splash_state.work_redraw_vc);
++
++ bootsplash_free_file(fp);
++
++ return count;
++}
++
++static ssize_t splash_store_load_file(struct device *device,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct splash_file_priv *fp, *fp_old;
++
++ if (!count)
++ return 0;
++
++ fp = bootsplash_load_firmware(&splash_state.splash_device->dev,
++ buf);
++
++ if (!fp)
++ return -ENXIO;
++
++ mutex_lock(&splash_state.data_lock);
++ fp_old = splash_state.file;
++ splash_state.splash_fb = NULL;
++ splash_state.file = fp;
++ mutex_unlock(&splash_state.data_lock);
++
++ /* Update the splash or text console */
++ schedule_work(&splash_state.work_redraw_vc);
++
++ bootsplash_free_file(fp_old);
++ return count;
++}
++
+ static DEVICE_ATTR(enabled, 0644, splash_show_enabled, splash_store_enabled);
++static DEVICE_ATTR(drop_splash, 0200, NULL, splash_store_drop_splash);
++static DEVICE_ATTR(load_file, 0200, NULL, splash_store_load_file);
+
+
+ static struct attribute *splash_dev_attrs[] = {
+ &dev_attr_enabled.attr,
++ &dev_attr_drop_splash.attr,
++ &dev_attr_load_file.attr,
+ NULL
+ };
+
diff --git a/sys-kernel/pinephone-sources/files/0012-pinephone-fix-pogopin-i2c.patch b/sys-kernel/pinephone-sources/files/0012-pinephone-fix-pogopin-i2c.patch
new file mode 100644
index 0000000..86a260b
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0012-pinephone-fix-pogopin-i2c.patch
@@ -0,0 +1,29 @@
+From d753557c64f6e85f63cffab53496d6271d724074 Mon Sep 17 00:00:00 2001
+From: Martijn Braam
+Date: Mon, 15 Feb 2021 13:10:37 -0800
+Subject: [PATCH] pinephone: fix pogopin i2c
+
+---
+ arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+index 39fdf96fe95d..7d0dd52e2f9d 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+@@ -635,11 +635,7 @@ &pio {
+ vcc-pb-supply = <®_dcdc1>;
+ vcc-pc-supply = <®_dcdc1>;
+ vcc-pd-supply = <®_dcdc1>;
+- /* pinctrl would enable this even if no camera is powered,
+- * which is wrong/not necessary
+- *
+- * vcc-pe-supply = <®_aldo1>; (also used by pogo pins i2c)
+- */
++ vcc-pe-supply = <®_aldo1>; /* (also used by pogo pins i2c) */
+ vcc-pf-supply = <®_dcdc1>;
+ vcc-pg-supply = <®_dldo4>;
+ vcc-ph-supply = <®_dcdc1>;
+--
+2.30.1
+
diff --git a/sys-kernel/pinephone-sources/files/0178-sun8i-codec-fix-headphone-jack-pin-name.patch b/sys-kernel/pinephone-sources/files/0178-sun8i-codec-fix-headphone-jack-pin-name.patch
new file mode 100644
index 0000000..3c27680
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0178-sun8i-codec-fix-headphone-jack-pin-name.patch
@@ -0,0 +1,25 @@
+From 2253c0d31cf17debb97db418bec21ad59cd47c14 Mon Sep 17 00:00:00 2001
+From: Arnaud Ferraris
+Date: Tue, 3 Nov 2020 17:04:35 +0100
+Subject: [PATCH 178/183] sun8i-codec: fix headphone jack pin name
+
+---
+ sound/soc/sunxi/sun8i-codec.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
+index 6128d861df90..ab751fd7c426 100644
+--- a/sound/soc/sunxi/sun8i-codec.c
++++ b/sound/soc/sunxi/sun8i-codec.c
+@@ -1272,7 +1272,7 @@ static const struct snd_soc_dapm_route sun8i_codec_legacy_routes[] = {
+
+ static struct snd_soc_jack_pin sun8i_codec_jack_pins[] = {
+ {
+- .pin = "Headphone Jack",
++ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+--
+2.30.0
+
diff --git a/sys-kernel/pinephone-sources/files/0179-arm64-dts-allwinner-pinephone-improve-device-tree-5.12.patch b/sys-kernel/pinephone-sources/files/0179-arm64-dts-allwinner-pinephone-improve-device-tree-5.12.patch
new file mode 100644
index 0000000..18065b2
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0179-arm64-dts-allwinner-pinephone-improve-device-tree-5.12.patch
@@ -0,0 +1,130 @@
+From 465a75a727ae5eb4c94859bfac4742cb14e38b3e Mon Sep 17 00:00:00 2001
+From: Arnaud Ferraris
+Date: Fri, 3 Apr 2020 17:13:55 +0200
+Subject: [PATCH 179/183] arm64: dts: allwinner: pinephone: improve device tree
+
+On PinePhone, the headset mic bias resistor isn't populated on the
+schematics (R811), therefore we need to enable the codec's internal
+resistor. Additionnally, the jack detection IRQ's are inverted due to the
+connector wiring, so the necessary property is added to the codec node
+to made the driver aware of this fact.
+
+We also stop LEDs during suspend to improve battery life, lower
+cpu_alert* temperatures so the phone doesn't get too hot and improve the
+backlight brightness values so we have a wider usable range.
+
+Finally, the RGB LED max_brightness is set to 1 as it isn't using a PWM
+output.
+---
+ .../dts/allwinner/sun50i-a64-pinephone-1.1.dts | 10 +++++-----
+ .../dts/allwinner/sun50i-a64-pinephone-1.2.dts | 13 ++++++-------
+ .../dts/allwinner/sun50i-a64-pinephone.dtsi | 18 +++++++++++++-----
+ 3 files changed, 24 insertions(+), 17 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+index f084c4f21f12..573f1929da4f 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+@@ -29,11 +29,11 @@ &backlight {
+ * value here was chosen as a safe default.
+ */
+ brightness-levels = <
+- 774 793 814 842
+- 882 935 1003 1088
+- 1192 1316 1462 1633
+- 1830 2054 2309 2596
+- 2916 3271 3664 4096>;
++ 392 413 436 468
++ 512 571 647 742
++ 857 995 1159 1349
++ 1568 1819 2103 2423
++ 2779 3176 3614 4096>;
+ num-interpolated-steps = <50>;
+ default-brightness-level = <400>;
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+index bbf64677c22b..6c3922543fec 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+@@ -34,14 +34,13 @@ &backlight {
+ * chosen as a safe default.
+ */
+ brightness-levels = <
+- 5000 5248 5506 5858 6345
+- 6987 7805 8823 10062 11543
+- 13287 15317 17654 20319 23336
+- 26724 30505 34702 39335 44427
+- 50000
+- >;
++ 392 413 436 468
++ 512 571 647 742
++ 857 995 1159 1349
++ 1568 1819 2103 2423
++ 2779 3176 3614 4096>;
+ num-interpolated-steps = <50>;
+- default-brightness-level = <500>;
++ default-brightness-level = <400>;
+ };
+
+ &lis3mdl {
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+index c55709197804..441358592072 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+@@ -219,21 +219,21 @@
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 20 GPIO_ACTIVE_HIGH>; /* PD20 */
+- retain-state-suspended;
++ max-brightness = <1>;
+ };
+
+ led-1 {
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 18 GPIO_ACTIVE_HIGH>; /* PD18 */
+- retain-state-suspended;
++ max-brightness = <1>;
+ };
+
+ led-2 {
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */
+- retain-state-suspended;
++ max-brightness = <1>;
+ };
+ };
+
+@@ -380,6 +380,14 @@
+ cpu-supply = <®_dcdc2>;
+ };
+
++&cpu_alert0 {
++ temperature = <60000>;
++};
++
++&cpu_alert1 {
++ temperature = <80000>;
++};
++
+ &csi {
+ pinctrl-0 = <&csi_pins>, <&csi_mclk_pin>;
+ status = "okay";
+@@ -816,11 +824,11 @@
+ simple-audio-card,aux-devs = <&codec_analog>, <&speaker_amp>;
+ simple-audio-card,widgets = "Microphone", "Headset Microphone",
+ "Microphone", "Internal Microphone",
+- "Headphone", "Headphone Jack",
++ "Headphone", "Headphone",
+ "Speaker", "Internal Earpiece",
+ "Speaker", "Internal Speaker";
+ simple-audio-card,routing =
+- "Headphone Jack", "HP",
++ "Headphone", "HP",
+ "Internal Earpiece", "EARPIECE",
+ "Internal Speaker", "Speaker Amp OUTL",
+ "Internal Speaker", "Speaker Amp OUTR",
+--
+2.30.0
+
diff --git a/sys-kernel/pinephone-sources/files/0179-arm64-dts-allwinner-pinephone-improve-device-tree.patch b/sys-kernel/pinephone-sources/files/0179-arm64-dts-allwinner-pinephone-improve-device-tree.patch
new file mode 100644
index 0000000..35a52ef
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/0179-arm64-dts-allwinner-pinephone-improve-device-tree.patch
@@ -0,0 +1,116 @@
+From 465a75a727ae5eb4c94859bfac4742cb14e38b3e Mon Sep 17 00:00:00 2001
+From: Arnaud Ferraris
+Date: Fri, 3 Apr 2020 17:13:55 +0200
+Subject: [PATCH 179/183] arm64: dts: allwinner: pinephone: improve device tree
+
+On PinePhone, the headset mic bias resistor isn't populated on the
+schematics (R811), therefore we need to enable the codec's internal
+resistor. Additionnally, the jack detection IRQ's are inverted due to the
+connector wiring, so the necessary property is added to the codec node
+to made the driver aware of this fact.
+
+We also stop LEDs during suspend to improve battery life, lower
+cpu_alert* temperatures so the phone doesn't get too hot and improve the
+backlight brightness values so we have a wider usable range.
+
+Finally, the RGB LED max_brightness is set to 1 as it isn't using a PWM
+output.
+---
+ .../dts/allwinner/sun50i-a64-pinephone-1.1.dts | 10 +++++-----
+ .../dts/allwinner/sun50i-a64-pinephone-1.2.dts | 13 ++++++-------
+ .../dts/allwinner/sun50i-a64-pinephone.dtsi | 18 +++++++++++++-----
+ 3 files changed, 24 insertions(+), 17 deletions(-)
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+index f084c4f21f12..573f1929da4f 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts
+@@ -29,11 +29,11 @@ &backlight {
+ * value here was chosen as a safe default.
+ */
+ brightness-levels = <
+- 774 793 814 842
+- 882 935 1003 1088
+- 1192 1316 1462 1633
+- 1830 2054 2309 2596
+- 2916 3271 3664 4096>;
++ 392 413 436 468
++ 512 571 647 742
++ 857 995 1159 1349
++ 1568 1819 2103 2423
++ 2779 3176 3614 4096>;
+ num-interpolated-steps = <50>;
+ default-brightness-level = <400>;
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+index bbf64677c22b..6c3922543fec 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts
+@@ -34,14 +34,13 @@ &backlight {
+ * chosen as a safe default.
+ */
+ brightness-levels = <
+- 5000 5248 5506 5858 6345
+- 6987 7805 8823 10062 11543
+- 13287 15317 17654 20319 23336
+- 26724 30505 34702 39335 44427
+- 50000
+- >;
++ 392 413 436 468
++ 512 571 647 742
++ 857 995 1159 1349
++ 1568 1819 2103 2423
++ 2779 3176 3614 4096>;
+ num-interpolated-steps = <50>;
+- default-brightness-level = <500>;
++ default-brightness-level = <400>;
+ };
+
+ &lis3mdl {
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+index c55709197804..441358592072 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+@@ -219,21 +219,21 @@
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 20 GPIO_ACTIVE_HIGH>; /* PD20 */
+- retain-state-suspended;
++ max-brightness = <1>;
+ };
+
+ led-1 {
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 18 GPIO_ACTIVE_HIGH>; /* PD18 */
+- retain-state-suspended;
++ max-brightness = <1>;
+ };
+
+ led-2 {
+ function = LED_FUNCTION_INDICATOR;
+ color = ;
+ gpios = <&pio 3 19 GPIO_ACTIVE_HIGH>; /* PD19 */
+- retain-state-suspended;
++ max-brightness = <1>;
+ };
+ };
+
+@@ -380,6 +380,14 @@
+ cpu-supply = <®_dcdc2>;
+ };
+
++&cpu_alert0 {
++ temperature = <60000>;
++};
++
++&cpu_alert1 {
++ temperature = <80000>;
++};
++
+ &csi {
+ pinctrl-0 = <&csi_pins>, <&csi_mclk_pin>;
+ status = "okay";
+--
+2.30.0
+
diff --git a/sys-kernel/pinephone-sources/files/5.11.3.patch b/sys-kernel/pinephone-sources/files/5.11.3.patch
new file mode 100644
index 0000000..116d4bc
--- /dev/null
+++ b/sys-kernel/pinephone-sources/files/5.11.3.patch
@@ -0,0 +1,31655 @@
+diff --git a/Documentation/admin-guide/perf/arm-cmn.rst b/Documentation/admin-guide/perf/arm-cmn.rst
+index 0e48093460140..796e25b7027b2 100644
+--- a/Documentation/admin-guide/perf/arm-cmn.rst
++++ b/Documentation/admin-guide/perf/arm-cmn.rst
+@@ -17,7 +17,7 @@ PMU events
+ ----------
+
+ The PMU driver registers a single PMU device for the whole interconnect,
+-see /sys/bus/event_source/devices/arm_cmn. Multi-chip systems may link
++see /sys/bus/event_source/devices/arm_cmn_0. Multi-chip systems may link
+ more than one CMN together via external CCIX links - in this situation,
+ each mesh counts its own events entirely independently, and additional
+ PMU devices will be named arm_cmn_{1..n}.
+diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
+index e35a3f2fb006a..586cd4b864284 100644
+--- a/Documentation/admin-guide/sysctl/vm.rst
++++ b/Documentation/admin-guide/sysctl/vm.rst
+@@ -983,11 +983,11 @@ that benefit from having their data cached, zone_reclaim_mode should be
+ left disabled as the caching effect is likely to be more important than
+ data locality.
+
+-zone_reclaim may be enabled if it's known that the workload is partitioned
+-such that each partition fits within a NUMA node and that accessing remote
+-memory would cause a measurable performance reduction. The page allocator
+-will then reclaim easily reusable pages (those page cache pages that are
+-currently not used) before allocating off node pages.
++Consider enabling one or more zone_reclaim mode bits if it's known that the
++workload is partitioned such that each partition fits within a NUMA node
++and that accessing remote memory would cause a measurable performance
++reduction. The page allocator will take additional actions before
++allocating off node pages.
+
+ Allowing zone reclaim to write out pages stops processes that are
+ writing large amounts of data from dirtying pages on other nodes. Zone
+diff --git a/Documentation/filesystems/seq_file.rst b/Documentation/filesystems/seq_file.rst
+index 56856481dc8d8..a6726082a7c25 100644
+--- a/Documentation/filesystems/seq_file.rst
++++ b/Documentation/filesystems/seq_file.rst
+@@ -217,6 +217,12 @@ between the calls to start() and stop(), so holding a lock during that time
+ is a reasonable thing to do. The seq_file code will also avoid taking any
+ other locks while the iterator is active.
+
++The iterater value returned by start() or next() is guaranteed to be
++passed to a subsequent next() or stop() call. This allows resources
++such as locks that were taken to be reliably released. There is *no*
++guarantee that the iterator will be passed to show(), though in practice
++it often will be.
++
+
+ Formatted output
+ ================
+diff --git a/Documentation/scsi/libsas.rst b/Documentation/scsi/libsas.rst
+index 7216b5d258001..de422253b0ab7 100644
+--- a/Documentation/scsi/libsas.rst
++++ b/Documentation/scsi/libsas.rst
+@@ -190,12 +190,10 @@ The event interface::
+
+ /* LLDD calls these to notify the class of an event. */
+ void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
+- void (*notify_port_event)(struct sas_phy *, enum port_event);
+- void (*notify_phy_event)(struct sas_phy *, enum phy_event);
+-
+-When sas_register_ha() returns, those are set and can be
+-called by the LLDD to notify the SAS layer of such events
+-the SAS layer.
++ void sas_notify_port_event(struct sas_phy *, enum port_event);
++ void sas_notify_phy_event(struct sas_phy *, enum phy_event);
++ void sas_notify_port_event_gfp(struct sas_phy *, enum port_event, gfp_t);
++ void sas_notify_phy_event_gfp(struct sas_phy *, enum phy_event, gfp_t);
+
+ The port notification::
+
+diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
+index aa0081685ee11..b3ed5c581034c 100644
+--- a/Documentation/security/keys/core.rst
++++ b/Documentation/security/keys/core.rst
+@@ -1040,8 +1040,8 @@ The keyctl syscall functions are:
+
+ "key" is the ID of the key to be watched.
+
+- "queue_fd" is a file descriptor referring to an open "/dev/watch_queue"
+- which manages the buffer into which notifications will be delivered.
++ "queue_fd" is a file descriptor referring to an open pipe which
++ manages the buffer into which notifications will be delivered.
+
+ "filter" is either NULL to remove a watch or a filter specification to
+ indicate what events are required from the key.
+diff --git a/Makefile b/Makefile
+index 617be9fd59ce5..a8c1162de3a0b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 11
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = 💕 Valentine's Day Edition 💕
+
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index d9cce7238a365..73eee41826e2d 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -1164,9 +1164,9 @@ __armv4_mmu_cache_off:
+ __armv7_mmu_cache_off:
+ mrc p15, 0, r0, c1, c0
+ #ifdef CONFIG_MMU
+- bic r0, r0, #0x000d
++ bic r0, r0, #0x0005
+ #else
+- bic r0, r0, #0x000c
++ bic r0, r0, #0x0004
+ #endif
+ mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
+ mov r0, #0
+diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
+index b3728de3bd3fa..ec134e22bae3e 100644
+--- a/arch/arm/boot/dts/armada-388-helios4.dts
++++ b/arch/arm/boot/dts/armada-388-helios4.dts
+@@ -70,6 +70,9 @@
+
+ system-leds {
+ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_system_led_pins>;
++
+ status-led {
+ label = "helios4:green:status";
+ gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
+@@ -86,6 +89,9 @@
+
+ io-leds {
+ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_io_led_pins>;
++
+ sata1-led {
+ label = "helios4:green:ata1";
+ gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+@@ -121,11 +127,15 @@
+ fan1: j10-pwm {
+ compatible = "pwm-fan";
+ pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_fan1_pins>;
+ };
+
+ fan2: j17-pwm {
+ compatible = "pwm-fan";
+ pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_fan2_pins>;
+ };
+
+ usb2_phy: usb2-phy {
+@@ -286,16 +296,22 @@
+ "mpp39", "mpp40";
+ marvell,function = "sd0";
+ };
+- helios_led_pins: helios-led-pins {
+- marvell,pins = "mpp24", "mpp25",
+- "mpp49", "mpp50",
++ helios_system_led_pins: helios-system-led-pins {
++ marvell,pins = "mpp24", "mpp25";
++ marvell,function = "gpio";
++ };
++ helios_io_led_pins: helios-io-led-pins {
++ marvell,pins = "mpp49", "mpp50",
+ "mpp52", "mpp53",
+ "mpp54";
+ marvell,function = "gpio";
+ };
+- helios_fan_pins: helios-fan-pins {
+- marvell,pins = "mpp41", "mpp43",
+- "mpp48", "mpp55";
++ helios_fan1_pins: helios_fan1_pins {
++ marvell,pins = "mpp41", "mpp43";
++ marvell,function = "gpio";
++ };
++ helios_fan2_pins: helios_fan2_pins {
++ marvell,pins = "mpp48", "mpp55";
+ marvell,function = "gpio";
+ };
+ microsom_spi1_cs_pins: spi1-cs-pins {
+diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
+index b3dafbc8cacac..e7a45ba18fc9c 100644
+--- a/arch/arm/boot/dts/aspeed-g4.dtsi
++++ b/arch/arm/boot/dts/aspeed-g4.dtsi
+@@ -375,6 +375,7 @@
+ compatible = "aspeed,ast2400-lpc-snoop";
+ reg = <0x10 0x8>;
+ interrupts = <8>;
++ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
+index 5bc0de0f33653..21930521a986a 100644
+--- a/arch/arm/boot/dts/aspeed-g5.dtsi
++++ b/arch/arm/boot/dts/aspeed-g5.dtsi
+@@ -497,6 +497,7 @@
+ compatible = "aspeed,ast2500-lpc-snoop";
+ reg = <0x10 0x8>;
+ interrupts = <8>;
++ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
+index 810b0676ab033..3ee470c2b7b56 100644
+--- a/arch/arm/boot/dts/aspeed-g6.dtsi
++++ b/arch/arm/boot/dts/aspeed-g6.dtsi
+@@ -524,6 +524,7 @@
+ compatible = "aspeed,ast2600-lpc-snoop";
+ reg = <0x0 0x80>;
+ interrupts = ;
++ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
+index 04290ec4583a6..829c05b2c405f 100644
+--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
++++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
+@@ -79,7 +79,7 @@
+ pmic@66 {
+ compatible = "samsung,s2mps14-pmic";
+ interrupt-parent = <&gpx3>;
+- interrupts = <5 IRQ_TYPE_NONE>;
++ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s2mps14_irq>;
+ reg = <0x66>;
+diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
+index 69451566945dc..fae046e08a5dd 100644
+--- a/arch/arm/boot/dts/exynos3250-monk.dts
++++ b/arch/arm/boot/dts/exynos3250-monk.dts
+@@ -200,7 +200,7 @@
+ pmic@66 {
+ compatible = "samsung,s2mps14-pmic";
+ interrupt-parent = <&gpx0>;
+- interrupts = <7 IRQ_TYPE_NONE>;
++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x66>;
+ wakeup-source;
+
+diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
+index a26e3e582a7e7..d64ccf4b7d324 100644
+--- a/arch/arm/boot/dts/exynos3250-rinato.dts
++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
+@@ -270,7 +270,7 @@
+ pmic@66 {
+ compatible = "samsung,s2mps14-pmic";
+ interrupt-parent = <&gpx0>;
+- interrupts = <7 IRQ_TYPE_NONE>;
++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x66>;
+ wakeup-source;
+
+diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
+index 9d2baea62d0d7..fba1462b19dfd 100644
+--- a/arch/arm/boot/dts/exynos5250-spring.dts
++++ b/arch/arm/boot/dts/exynos5250-spring.dts
+@@ -109,7 +109,7 @@
+ compatible = "samsung,s5m8767-pmic";
+ reg = <0x66>;
+ interrupt-parent = <&gpx3>;
+- interrupts = <2 IRQ_TYPE_NONE>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
+ wakeup-source;
+diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+index bf457d0c02ebd..1aad4859c5f14 100644
+--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+@@ -349,7 +349,7 @@
+ reg = <0x66>;
+
+ interrupt-parent = <&gpx3>;
+- interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s2mps11_irq>;
+
+diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+index d0df560eb0db1..6d690b1db0994 100644
+--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+@@ -509,7 +509,7 @@
+ samsung,s2mps11-acokb-ground;
+
+ interrupt-parent = <&gpx0>;
+- interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s2mps11_irq>;
+
+diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
+index cb309743de5da..dd8ef58cbaed4 100644
+--- a/arch/arm/boot/dts/omap443x.dtsi
++++ b/arch/arm/boot/dts/omap443x.dtsi
+@@ -33,10 +33,12 @@
+ };
+
+ ocp {
++ /* 4430 has only gpio_86 tshut and no talert interrupt */
+ bandgap: bandgap@4a002260 {
+ reg = <0x4a002260 0x4
+ 0x4a00232C 0x4>;
+ compatible = "ti,omap4430-bandgap";
++ gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+
+ #thermal-sensor-cells = <0>;
+ };
+diff --git a/arch/arm/boot/dts/tegra30-ouya.dts b/arch/arm/boot/dts/tegra30-ouya.dts
+index 74da1360d297c..0368b3b816ef2 100644
+--- a/arch/arm/boot/dts/tegra30-ouya.dts
++++ b/arch/arm/boot/dts/tegra30-ouya.dts
+@@ -4352,8 +4352,8 @@
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi_alt3";
+ nvidia,pull = ;
+- nvidia,tristate = ;
+- nvidia,enable-input = ;
++ nvidia,tristate = ;
++ nvidia,enable-input = ;
+ };
+ pcc1 {
+ nvidia,pins = "pcc1";
+diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
+index 0203e545bbc8d..075a2e0ed2c15 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -248,6 +248,7 @@ struct oabi_epoll_event {
+ __u64 data;
+ } __attribute__ ((packed,aligned(4)));
+
++#ifdef CONFIG_EPOLL
+ asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event)
+ {
+@@ -298,6 +299,20 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
+ kfree(kbuf);
+ return err ? -EFAULT : ret;
+ }
++#else
++asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
++ struct oabi_epoll_event __user *event)
++{
++ return -EINVAL;
++}
++
++asmlinkage long sys_oabi_epoll_wait(int epfd,
++ struct oabi_epoll_event __user *events,
++ int maxevents, int timeout)
++{
++ return -EINVAL;
++}
++#endif
+
+ struct oabi_sembuf {
+ unsigned short sem_num;
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
+index 0184de05c1be1..b683c2caa40b9 100644
+--- a/arch/arm/mach-at91/pm_suspend.S
++++ b/arch/arm/mach-at91/pm_suspend.S
+@@ -442,7 +442,7 @@ ENDPROC(at91_backup_mode)
+ str tmp1, [pmc, #AT91_PMC_PLL_UPDT]
+
+ /* step 2. */
+- ldr tmp1, =#AT91_PMC_PLL_ACR_DEFAULT_PLLA
++ ldr tmp1, =AT91_PMC_PLL_ACR_DEFAULT_PLLA
+ str tmp1, [pmc, #AT91_PMC_PLL_ACR]
+
+ /* step 3. */
+diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
+index f7211b57b1e78..165c184801e19 100644
+--- a/arch/arm/mach-ixp4xx/Kconfig
++++ b/arch/arm/mach-ixp4xx/Kconfig
+@@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
+ select I2C
+ select I2C_IOP3XX
+ select PCI
+- select TIMER_OF
+ select USE_OF
+ help
+ Say 'Y' here to support Device Tree-based IXP4xx platforms.
+diff --git a/arch/arm/mach-s3c/irq-s3c24xx-fiq.S b/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
+index b54cbd0122413..5d238d9a798e1 100644
+--- a/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
++++ b/arch/arm/mach-s3c/irq-s3c24xx-fiq.S
+@@ -35,7 +35,6 @@
+ @ and an offset to the irq acknowledgment word
+
+ ENTRY(s3c24xx_spi_fiq_rx)
+-s3c24xx_spi_fix_rx:
+ .word fiq_rx_end - fiq_rx_start
+ .word fiq_rx_irq_ack - fiq_rx_start
+ fiq_rx_start:
+@@ -49,7 +48,7 @@ fiq_rx_start:
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+- subnes pc, lr, #4 @@ return, still have work to do
++ subsne pc, lr, #4 @@ return, still have work to do
+
+ @@ set IRQ controller so that next op will trigger IRQ
+ mov fiq_rtmp, #0
+@@ -61,7 +60,6 @@ fiq_rx_irq_ack:
+ fiq_rx_end:
+
+ ENTRY(s3c24xx_spi_fiq_txrx)
+-s3c24xx_spi_fiq_txrx:
+ .word fiq_txrx_end - fiq_txrx_start
+ .word fiq_txrx_irq_ack - fiq_txrx_start
+ fiq_txrx_start:
+@@ -76,7 +74,7 @@ fiq_txrx_start:
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+- subnes pc, lr, #4 @@ return, still have work to do
++ subsne pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+@@ -88,7 +86,6 @@ fiq_txrx_irq_ack:
+ fiq_txrx_end:
+
+ ENTRY(s3c24xx_spi_fiq_tx)
+-s3c24xx_spi_fix_tx:
+ .word fiq_tx_end - fiq_tx_start
+ .word fiq_tx_irq_ack - fiq_tx_start
+ fiq_tx_start:
+@@ -101,7 +98,7 @@ fiq_tx_start:
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+- subnes pc, lr, #4 @@ return, still have work to do
++ subsne pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index f39568b28ec1c..3dfb25afa616f 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -522,7 +522,7 @@ config ARM64_ERRATUM_1024718
+ help
+ This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
+
+- Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
++ Affected Cortex-A55 cores (all revisions) could cause incorrect
+ update of the hardware dirty bit when the DBM/AP bits are updated
+ without a break-before-make. The workaround is to disable the usage
+ of hardware DBM locally on the affected cores. CPUs not affected by
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+index c48692b06e1fa..3402cec87035b 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+@@ -32,7 +32,6 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ vmmc-supply = <®_dcdc1>;
+- non-removable;
+ disable-wp;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+index 4b517ca720597..06de0b1ce7267 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+@@ -89,13 +89,12 @@
+ status = "okay";
+ };
+
+-&sd_emmc_a {
+- sd-uhs-sdr50;
+-};
+-
+ &usb {
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
+ };
+ */
+
++&sd_emmc_a {
++ sd-uhs-sdr50;
++};
+diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+index f873dc44ce9ca..55d9b56ac749d 100644
+--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+@@ -164,7 +164,7 @@
+ nand@1800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "brcm,brcmnand-v7.1", "brcm,brcmnand";
++ compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.1", "brcm,brcmnand";
+ reg = <0x1800 0x600>, <0x2000 0x10>;
+ reg-names = "nand", "nand-int-base";
+ interrupts = ;
+diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+index 03486a8ffc67e..4c5106a0860d0 100644
+--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
++++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+@@ -388,7 +388,7 @@
+ pmic@66 {
+ compatible = "samsung,s2mps13-pmic";
+ interrupt-parent = <&gpa0>;
+- interrupts = <7 IRQ_TYPE_NONE>;
++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x66>;
+ samsung,s2mps11-wrstbi-ground;
+
+diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+index 695d4c1406466..125c03f351d97 100644
+--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
++++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+@@ -90,7 +90,7 @@
+ pmic@66 {
+ compatible = "samsung,s2mps15-pmic";
+ reg = <0x66>;
+- interrupts = <2 IRQ_TYPE_NONE>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpa0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq>;
+diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+index e1c0fcba5c206..07c099b4ed5b5 100644
+--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
++++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+@@ -166,7 +166,7 @@
+ rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 2>;
+- altr,sysmgr-syscon = <&sysmgr 0x48 8>;
++ altr,sysmgr-syscon = <&sysmgr 0x48 0>;
+ clocks = <&clkmgr AGILEX_EMAC1_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>;
+ clock-names = "stmmaceth", "ptp_ref";
+ status = "disabled";
+@@ -184,7 +184,7 @@
+ rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 3>;
+- altr,sysmgr-syscon = <&sysmgr 0x4c 16>;
++ altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
+ clocks = <&clkmgr AGILEX_EMAC2_CLK>, <&clkmgr AGILEX_EMAC_PTP_CLK>;
+ clock-names = "stmmaceth", "ptp_ref";
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index f5ec3b6447692..d239ab70ed995 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -205,7 +205,7 @@
+ };
+
+ partition@20000 {
+- label = "u-boot";
++ label = "a53-firmware";
+ reg = <0x20000 0x160000>;
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 5b9ec032ce8d8..7c6d871538a63 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -698,6 +698,8 @@
+ clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
+ <&topckgen CLK_TOP_AXI_SEL>;
+ clock-names = "source", "hclk";
++ resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
++ reset-names = "hrst";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 5b782a4769e7e..36a90dd2fa7c6 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -6,7 +6,7 @@
+ */
+
+ #include
+-#include
++#include
+ #include
+ #include
+ #include
+@@ -661,6 +661,7 @@
+ compatible = "mediatek,mt8183-disp-pwm";
+ reg = <0 0x1100e000 0 0x1000>;
+ interrupts = ;
++ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+ #pwm-cells = <2>;
+ clocks = <&topckgen CLK_TOP_MUX_DISP_PWM>,
+ <&infracfg CLK_INFRA_DISP_PWM>;
+@@ -1011,7 +1012,7 @@
+ clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+ mediatek,larb = <&larb0>;
+- mediatek,rdma_fifo_size = <5120>;
++ mediatek,rdma-fifo-size = <5120>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xb000 0x1000>;
+ };
+
+@@ -1023,7 +1024,7 @@
+ clocks = <&mmsys CLK_MM_DISP_RDMA1>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+ mediatek,larb = <&larb0>;
+- mediatek,rdma_fifo_size = <2048>;
++ mediatek,rdma-fifo-size = <2048>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xc000 0x1000>;
+ };
+
+@@ -1055,8 +1056,7 @@
+ };
+
+ gamma0: gamma@14011000 {
+- compatible = "mediatek,mt8183-disp-gamma",
+- "mediatek,mt8173-disp-gamma";
++ compatible = "mediatek,mt8183-disp-gamma";
+ reg = <0 0x14011000 0 0x1000>;
+ interrupts = ;
+ power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+index f91269492d729..f1af798abd749 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+@@ -106,6 +106,9 @@
+ interrupt-parent = <&msmgpio>;
+ interrupts = <115 IRQ_TYPE_EDGE_RISING>;
+
++ vdd-supply = <&pm8916_l17>;
++ vddio-supply = <&pm8916_l5>;
++
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_int_default>;
+ };
+@@ -113,6 +116,9 @@
+ magnetometer@12 {
+ compatible = "bosch,bmc150_magn";
+ reg = <0x12>;
++
++ vdd-supply = <&pm8916_l17>;
++ vddio-supply = <&pm8916_l5>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+index e39c04d977c25..dd35c3344358c 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
++++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+@@ -38,7 +38,7 @@
+
+ &pronto {
+ iris {
+- compatible = "qcom,wcn3680";
++ compatible = "qcom,wcn3660b";
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 402e891a84ab6..d25f6dc751e99 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -56,7 +56,7 @@
+ no-map;
+ };
+
+- reserved@8668000 {
++ reserved@86680000 {
+ reg = <0x0 0x86680000 0x0 0x80000>;
+ no-map;
+ };
+@@ -69,7 +69,7 @@
+ qcom,client-id = <1>;
+ };
+
+- rfsa@867e00000 {
++ rfsa@867e0000 {
+ reg = <0x0 0x867e0000 0x0 0x20000>;
+ no-map;
+ };
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index ce22d4fa383e6..f13a63ca8efd6 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -122,7 +122,7 @@
+
+ &apps_rsc {
+ pm8009-rpmh-regulators {
+- compatible = "qcom,pm8009-rpmh-regulators";
++ compatible = "qcom,pm8009-1-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-s1-supply = <&vph_pwr>;
+@@ -131,6 +131,13 @@
+ vdd-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_s4a_1p8>;
+
++ vreg_s2f_0p95: smps2 {
++ regulator-name = "vreg_s2f_0p95";
++ regulator-min-microvolt = <900000>;
++ regulator-max-microvolt = <952000>;
++ regulator-initial-mode = ;
++ };
++
+ vreg_l1f_1p1: ldo1 {
+ regulator-name = "vreg_l1f_1p1";
+ regulator-min-microvolt = <1104000>;
+@@ -491,8 +498,6 @@
+ vqmmc-supply = <&vreg_l6c_2p96>;
+ cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+- /* there seem to be issues with HS400-1.8V mode, so disable it */
+- no-1-8-v;
+ no-sdio;
+ no-emmc;
+ };
+@@ -706,13 +711,13 @@
+ cmd {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+- drive-strength = <16>;
++ drive-strength = <10>;
+ };
+
+ data {
+ pins = "sdc2_data";
+ bias-pull-up;
+- drive-strength = <16>;
++ drive-strength = <10>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index c0b93813ea9ac..c4ac6f5dc008d 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -1114,11 +1114,11 @@
+ reg = <0x10>;
+
+ // CAM0_RST_N
+- reset-gpios = <&tlmm 9 0>;
++ reset-gpios = <&tlmm 9 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_default>;
+ gpios = <&tlmm 13 0>,
+- <&tlmm 9 0>;
++ <&tlmm 9 GPIO_ACTIVE_LOW>;
+
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "xvclk";
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 65acd1f381eba..1ae90e8b70f32 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -1657,7 +1657,7 @@
+
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>,
+- <&xo_board>;
++ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "core", "xo";
+ iommus = <&apps_smmu 0x4a0 0x0>;
+ qcom,dll-config = <0x0007642c>;
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+index e66b5b36e4894..759734b7715bd 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+@@ -150,7 +150,7 @@
+ regulator-name = "audio-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+- gpio = <&gpio_exp2 7 GPIO_ACTIVE_HIGH>;
++ gpio = <&gpio_exp4 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+index 8ac167aa18f04..ea937a926c0e3 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+@@ -89,7 +89,6 @@
+ pinctrl-names = "default";
+ uart-has-rtscts;
+ status = "okay";
+- max-speed = <4000000>;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+@@ -98,6 +97,7 @@
+ device-wakeup-gpios = <&pca9654 5 GPIO_ACTIVE_HIGH>;
+ clocks = <&osc_32k>;
+ clock-names = "extclk";
++ max-speed = <4000000>;
+ };
+ };
+
+@@ -148,7 +148,7 @@
+ };
+
+ eeprom@50 {
+- compatible = "microchip,at24c64", "atmel,24c64";
++ compatible = "microchip,24c64", "atmel,24c64";
+ pagesize = <32>;
+ read-only; /* Manufacturing EEPROM programmed at factory */
+ reg = <0x50>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index db0d5c8e5f96a..93c734d8a46c2 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -928,6 +928,7 @@
+ phy-mode = "rmii";
+ phy-handle = <&phy>;
+ snps,txpbl = <0x4>;
++ clock_in_out = "output";
+ status = "disabled";
+
+ mdio {
+diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
+index 34b8a89197be3..cafb5b96be0e6 100644
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -55,7 +55,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+ #define aes_mac_update neon_aes_mac_update
+ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
+ #endif
+-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
++#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
+ MODULE_ALIAS_CRYPTO("ecb(aes)");
+ MODULE_ALIAS_CRYPTO("cbc(aes)");
+ MODULE_ALIAS_CRYPTO("ctr(aes)");
+@@ -650,7 +650,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
+ }
+
+ static struct skcipher_alg aes_algs[] = { {
+-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
++#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
+ .base = {
+ .cra_name = "__ecb(aes)",
+ .cra_driver_name = "__ecb-aes-" MODE,
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index c93121bcfdeba..c1362861765fb 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -19,6 +19,7 @@
+ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel ");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha1");
+
+ struct sha1_ce_state {
+ struct sha1_state sst;
+diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
+index 31ba3da5e61bd..ded3a6488f817 100644
+--- a/arch/arm64/crypto/sha2-ce-glue.c
++++ b/arch/arm64/crypto/sha2-ce-glue.c
+@@ -19,6 +19,8 @@
+ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel ");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha224");
++MODULE_ALIAS_CRYPTO("sha256");
+
+ struct sha256_ce_state {
+ struct sha256_state sst;
+diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
+index e5a2936f08864..7288d30463548 100644
+--- a/arch/arm64/crypto/sha3-ce-glue.c
++++ b/arch/arm64/crypto/sha3-ce-glue.c
+@@ -23,6 +23,10 @@
+ MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel ");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha3-224");
++MODULE_ALIAS_CRYPTO("sha3-256");
++MODULE_ALIAS_CRYPTO("sha3-384");
++MODULE_ALIAS_CRYPTO("sha3-512");
+
+ asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ int md_len);
+diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
+index faa83f6cf376c..a6b1adf31c56b 100644
+--- a/arch/arm64/crypto/sha512-ce-glue.c
++++ b/arch/arm64/crypto/sha512-ce-glue.c
+@@ -23,6 +23,8 @@
+ MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel ");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha384");
++MODULE_ALIAS_CRYPTO("sha512");
+
+ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
+diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
+index 691f15af788e4..810045628c66e 100644
+--- a/arch/arm64/include/asm/module.lds.h
++++ b/arch/arm64/include/asm/module.lds.h
+@@ -1,7 +1,7 @@
+ #ifdef CONFIG_ARM64_MODULE_PLTS
+ SECTIONS {
+- .plt (NOLOAD) : { BYTE(0) }
+- .init.plt (NOLOAD) : { BYTE(0) }
+- .text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
++ .plt 0 (NOLOAD) : { BYTE(0) }
++ .init.plt 0 (NOLOAD) : { BYTE(0) }
++ .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
+ }
+ #endif
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 3e6331b649323..33b6f56dcb21b 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1455,7 +1455,7 @@ static bool cpu_has_broken_dbm(void)
+ /* List of CPUs which have broken DBM support. */
+ static const struct midr_range cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_1024718
+- MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
+ #endif
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index a0dc987724eda..7ec430e18f95e 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -882,6 +882,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
+
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
++ isb
+
+ msr sctlr_el1, x19 // re-enable the MMU
+ isb
+diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
+index 03210f6447900..0cde47a63bebf 100644
+--- a/arch/arm64/kernel/machine_kexec_file.c
++++ b/arch/arm64/kernel/machine_kexec_file.c
+@@ -182,8 +182,10 @@ static int create_dtb(struct kimage *image,
+
+ /* duplicate a device tree blob */
+ ret = fdt_open_into(initial_boot_params, buf, buf_size);
+- if (ret)
++ if (ret) {
++ vfree(buf);
+ return -EINVAL;
++ }
+
+ ret = setup_dtb(image, initrd_load_addr, initrd_len,
+ cmdline, buf);
+diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
+index a412d8edbcd24..2c247634552b1 100644
+--- a/arch/arm64/kernel/probes/uprobes.c
++++ b/arch/arm64/kernel/probes/uprobes.c
+@@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+
+ /* TODO: Currently we do not support AARCH32 instruction probing */
+ if (mm->context.flags & MMCF_AARCH32)
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ return -EINVAL;
+
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 8ac487c84e379..1d75471979cb1 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1796,7 +1796,7 @@ int syscall_trace_enter(struct pt_regs *regs)
+
+ if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+- if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
++ if (flags & _TIF_SYSCALL_EMU)
+ return NO_SYSCALL;
+ }
+
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index a67b37a7a47e1..d7564891ffe12 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ if (!ret)
+ ret = -EOPNOTSUPP;
+ } else {
+- __cpu_suspend_exit();
++ RCU_NONIDLE(__cpu_suspend_exit());
+ }
+
+ unpause_graph_tracing();
+diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
+index d822144906ac1..a4cf2e2ac15ac 100644
+--- a/arch/csky/kernel/ptrace.c
++++ b/arch/csky/kernel/ptrace.c
+@@ -83,7 +83,7 @@ static int gpr_get(struct task_struct *target,
+ /* Abiv1 regs->tls is fake and we need sync here. */
+ regs->tls = task_thread_info(target)->tp_value;
+
+- return membuf_write(&to, regs, sizeof(regs));
++ return membuf_write(&to, regs, sizeof(*regs));
+ }
+
+ static int gpr_set(struct task_struct *target,
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index cd4343edeb11b..5ffdd67093bc6 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -136,6 +136,25 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
+ #
+ cflags-y += -fno-stack-check
+
++# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes,
++# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll
++# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h
++# for a description).
++#
++# We disable this in order to prevent the assembler meddling with the
++# instruction that labels refer to, ie. if we label an ll instruction:
++#
++# 1: ll v0, 0(a0)
++#
++# ...then with the assembler fix applied the label may actually point at a sync
++# instruction inserted by the assembler, and if we were using the label in an
++# exception table the table would no longer contain the address of the ll
++# instruction.
++#
++# Avoid this by explicitly disabling that assembler behaviour.
++#
++cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
++
+ #
+ # CPU-dependent compiler/assembler options for optimization.
+ #
+diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
+index 47cd9dc7454af..f93f72bcba97e 100644
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -37,6 +37,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+ KCOV_INSTRUMENT := n
+ GCOV_PROFILE := n
++UBSAN_SANITIZE := n
+
+ # decompressor objects (linked with vmlinuz)
+ vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
+index 982826ba0ef70..ce4e2806159bb 100644
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -1149,12 +1149,15 @@ void __init device_tree_init(void)
+ bool do_prune;
+ bool fill_mac;
+
+- if (fw_passed_dtb) {
+- fdt = (void *)fw_passed_dtb;
++#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
++ if (!fdt_check_header(&__appended_dtb)) {
++ fdt = &__appended_dtb;
+ do_prune = false;
+ fill_mac = true;
+ pr_info("Using appended Device Tree.\n");
+- } else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
++ } else
++#endif
++ if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+ fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
+ if (fdt_check_header(fdt))
+ panic("Corrupt Device Tree passed to kernel.");
+diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
+index 3682d1a0bb808..ea4b62ece3366 100644
+--- a/arch/mips/include/asm/asm.h
++++ b/arch/mips/include/asm/asm.h
+@@ -20,10 +20,27 @@
+ #include
+ #include
+
++#ifndef __VDSO__
++/*
++ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
++ * We don't do DWARF unwinding at runtime, so only the offline DWARF
++ * information is useful to anyone. Note we should change this if we
++ * ever decide to enable DWARF unwinding at runtime.
++ */
++#define CFI_SECTIONS .cfi_sections .debug_frame
++#else
++ /*
++ * For the vDSO, emit both runtime unwind information and debug
++ * symbols for the .dbg file.
++ */
++#define CFI_SECTIONS
++#endif
++
+ /*
+ * LEAF - declare leaf routine
+ */
+ #define LEAF(symbol) \
++ CFI_SECTIONS; \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+@@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \
+ * NESTED - declare nested routine entry point
+ */
+ #define NESTED(symbol, framesize, rpc) \
++ CFI_SECTIONS; \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index f904084fcb1fd..27ad767915390 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -248,7 +248,7 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+ * bltz that can branch to code outside of the LL/SC loop. As \
+ * such, we don't need to emit another barrier here. \
+ */ \
+- if (!__SYNC_loongson3_war) \
++ if (__SYNC_loongson3_war == 0) \
+ smp_mb__after_atomic(); \
+ \
+ return result; \
+diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
+index 5b0b3a6777ea5..ed8f3f3c4304a 100644
+--- a/arch/mips/include/asm/cmpxchg.h
++++ b/arch/mips/include/asm/cmpxchg.h
+@@ -99,7 +99,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+- if (!__SYNC_loongson3_war) \
++ if (__SYNC_loongson3_war == 0) \
+ smp_mb__before_llsc(); \
+ \
+ __res = (__typeof__(*(ptr))) \
+@@ -191,7 +191,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+- if (!__SYNC_loongson3_war) \
++ if (__SYNC_loongson3_war == 0) \
+ smp_mb__before_llsc(); \
+ \
+ __res = cmpxchg_local((ptr), (old), (new)); \
+@@ -201,7 +201,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ * contains a completion barrier after the SC, so we don't \
+ * need to emit an extra one here. \
+ */ \
+- if (!__SYNC_loongson3_war) \
++ if (__SYNC_loongson3_war == 0) \
+ smp_llsc_mb(); \
+ \
+ __res; \
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 6a77bc4a6eec4..74082e35d57c8 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -255,6 +255,12 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
+
+ #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
+
++extern unsigned long __kaslr_offset;
++static inline unsigned long kaslr_offset(void)
++{
++ return __kaslr_offset;
++}
++
+ #include
+ #include
+
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index e6853697a0561..31cb9199197ca 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1830,16 +1830,17 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+ */
+ case PRID_COMP_INGENIC_D0:
+ c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+- break;
++ fallthrough;
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+- * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
+- * mode is not compatible with the MIPS standard, it will cause
+- * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
+- * when starting the init process. After chip reset, the default
+- * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
+- * switch back to VTLB mode to prevent getting stuck.
++ * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
++ * huge page tlb mode, this mode is not compatible with the MIPS
++ * standard, it will cause tlbmiss and into an infinite loop
++ * (line 21 in the tlb-funcs.S) when starting the init process.
++ * After chip reset, the default is HPTLB mode, Write 0xa9000000
++ * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
++ * getting stuck.
+ */
+ case PRID_COMP_INGENIC_D1:
+ write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
+index 0e365b7c742d9..ac16cf2716df5 100644
+--- a/arch/mips/kernel/relocate.c
++++ b/arch/mips/kernel/relocate.c
+@@ -300,6 +300,13 @@ static inline int __init relocation_addr_valid(void *loc_new)
+ return 1;
+ }
+
++static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
++{
++ unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
++
++ *new_addr = (unsigned long)offset;
++}
++
+ #if defined(CONFIG_USE_OF)
+ void __weak *plat_get_fdt(void)
+ {
+@@ -410,6 +417,9 @@ void *__init relocate_kernel(void)
+
+ /* Return the new kernel's entry point */
+ kernel_entry = RELOCATED(start_kernel);
++
++ /* Error may occur before, so keep it at last */
++ update_kaslr_offset(&__kaslr_offset, offset);
+ }
+ out:
+ return kernel_entry;
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 7e1f8e2774373..83ec0d5a0918b 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -84,6 +84,9 @@ static struct resource code_resource = { .name = "Kernel code", };
+ static struct resource data_resource = { .name = "Kernel data", };
+ static struct resource bss_resource = { .name = "Kernel bss", };
+
++unsigned long __kaslr_offset __ro_after_init;
++EXPORT_SYMBOL(__kaslr_offset);
++
+ static void *detect_magic __initdata = detect_memory_region;
+
+ #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index 5e97e9d02f98d..09fa4705ce8eb 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -90,6 +90,7 @@ SECTIONS
+
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
++ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ DATA_DATA
+@@ -223,6 +224,5 @@ SECTIONS
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+- *(.eh_frame)
+ }
+ }
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index df8eed3875f6d..43c2f271e6ab4 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -302,7 +302,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+
+ /* if this is a EBU irq, we need to ack it or get a deadlock */
+- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
++ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
+ LTQ_EBU_PCC_ISTAT);
+ }
+diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
+index ec42c5085905c..e2354e128d9a0 100644
+--- a/arch/mips/loongson64/Platform
++++ b/arch/mips/loongson64/Platform
+@@ -5,28 +5,6 @@
+
+ cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
+
+-#
+-# Some versions of binutils, not currently mainline as of 2019/02/04, support
+-# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
+-# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
+-# description).
+-#
+-# We disable this in order to prevent the assembler meddling with the
+-# instruction that labels refer to, ie. if we label an ll instruction:
+-#
+-# 1: ll v0, 0(a0)
+-#
+-# ...then with the assembler fix applied the label may actually point at a sync
+-# instruction inserted by the assembler, and if we were using the label in an
+-# exception table the table would no longer contain the address of the ll
+-# instruction.
+-#
+-# Avoid this by explicitly disabling that assembler behaviour. If upstream
+-# binutils does not merge support for the flag then we can revisit & remove
+-# this later - for now it ensures vendor toolchains don't cause problems.
+-#
+-cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+-
+ #
+ # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+ # as MIPS64 R2; older versions as just R1. This leaves the possibility open
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index 4f976d687ab00..f67297b3175fe 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -1593,7 +1593,7 @@ static int probe_scache(void)
+ return 1;
+ }
+
+-static void __init loongson2_sc_init(void)
++static void loongson2_sc_init(void)
+ {
+ struct cpuinfo_mips *c = ¤t_cpu_data;
+
+diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
+index 5810cc12bc1d9..2131d3fd73333 100644
+--- a/arch/mips/vdso/Makefile
++++ b/arch/mips/vdso/Makefile
+@@ -16,16 +16,13 @@ ccflags-vdso := \
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
+ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
++ $(CLANG_FLAGS) \
+ -D__VDSO__
+
+ ifndef CONFIG_64BIT
+ ccflags-vdso += -DBUILD_VDSO32
+ endif
+
+-ifdef CONFIG_CC_IS_CLANG
+-ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
+-endif
+-
+ #
+ # The -fno-jump-tables flag only prevents the compiler from generating
+ # jump tables but does not prevent the compiler from emitting absolute
+diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
+index da8442450e460..0794cd7803dfe 100644
+--- a/arch/nios2/kernel/entry.S
++++ b/arch/nios2/kernel/entry.S
+@@ -389,7 +389,10 @@ ENTRY(ret_from_interrupt)
+ */
+ ENTRY(sys_clone)
+ SAVE_SWITCH_STACK
++ subi sp, sp, 4 /* make space for tls pointer */
++ stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
+ call nios2_clone
++ addi sp, sp, 4
+ RESTORE_SWITCH_STACK
+ ret
+
+diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c
+index cd390ec4f88bf..b1ca856999521 100644
+--- a/arch/nios2/kernel/sys_nios2.c
++++ b/arch/nios2/kernel/sys_nios2.c
+@@ -22,6 +22,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
+ unsigned int op)
+ {
+ struct vm_area_struct *vma;
++ struct mm_struct *mm = current->mm;
+
+ if (len == 0)
+ return 0;
+@@ -34,16 +35,22 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
+ if (addr + len < addr)
+ return -EFAULT;
+
++ if (mmap_read_lock_killable(mm))
++ return -EINTR;
++
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+- vma = find_vma(current->mm, addr);
+- if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
++ vma = find_vma(mm, addr);
++ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
++ mmap_read_unlock(mm);
+ return -EFAULT;
++ }
+
+ flush_cache_range(vma, addr, addr + len);
+
++ mmap_read_unlock(mm);
+ return 0;
+ }
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 107bb4319e0e0..a685e42d39932 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -772,7 +772,7 @@ config PPC_64K_PAGES
+
+ config PPC_256K_PAGES
+ bool "256k page size"
+- depends on 44x && !STDBINUTILS
++ depends on 44x && !STDBINUTILS && !PPC_47x
+ help
+ Make the page size 256k.
+
+diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
+index 55d6ede30c19a..9ab344d29a545 100644
+--- a/arch/powerpc/include/asm/kexec.h
++++ b/arch/powerpc/include/asm/kexec.h
+@@ -136,6 +136,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
+ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr);
++unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image);
+ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len, const char *cmdline);
+diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
+index edc08f04aef77..5d1726bb28e79 100644
+--- a/arch/powerpc/include/asm/paravirt.h
++++ b/arch/powerpc/include/asm/paravirt.h
+@@ -10,6 +10,7 @@
+ #endif
+
+ #ifdef CONFIG_PPC_SPLPAR
++#include
+ #include
+ #include
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 501c9a79038c0..f53bfefb4a577 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -216,8 +216,6 @@ do { \
+ #define __put_user_nocheck_goto(x, ptr, size, label) \
+ do { \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+- if (!is_kernel_addr((unsigned long)__pu_addr)) \
+- might_fault(); \
+ __chk_user_ptr(ptr); \
+ __put_user_size_goto((x), __pu_addr, (size), label); \
+ } while (0)
+@@ -313,7 +311,7 @@ do { \
+ __typeof__(size) __gu_size = (size); \
+ \
+ __chk_user_ptr(__gu_addr); \
+- if (!is_kernel_addr((unsigned long)__gu_addr)) \
++ if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
+ might_fault(); \
+ barrier_nospec(); \
+ if (do_allow) \
+@@ -508,6 +506,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
+ {
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
++
++ might_fault();
++
+ allow_read_write_user((void __user *)ptr, ptr, len);
+ return true;
+ }
+@@ -521,6 +522,9 @@ user_read_access_begin(const void __user *ptr, size_t len)
+ {
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
++
++ might_fault();
++
+ allow_read_from_user(ptr, len);
+ return true;
+ }
+@@ -532,6 +536,9 @@ user_write_access_begin(const void __user *ptr, size_t len)
+ {
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
++
++ might_fault();
++
+ allow_write_to_user((void __user *)ptr, len);
+ return true;
+ }
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 1c9b0ccc2172e..9bc4e7dd0beef 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -356,6 +356,9 @@ trace_syscall_entry_irq_off:
+
+ .globl transfer_to_syscall
+ transfer_to_syscall:
++#ifdef CONFIG_PPC_BOOK3S_32
++ kuep_lock r11, r12
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ andi. r12,r9,MSR_EE
+ beq- trace_syscall_entry_irq_off
+diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
+index a2f72c966bafb..abc7b603ab65c 100644
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -47,7 +47,7 @@
+ lwz r1,TASK_STACK-THREAD(r1)
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ 1:
+- mtcrf 0x7f, r1
++ mtcrf 0x3f, r1
+ bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
+ #else
+ subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 52702f3db6df6..9eb63cf6ac38e 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -165,7 +165,7 @@ SystemCall:
+ /* On the MPC8xx, this is a software emulation interrupt. It occurs
+ * for all unimplemented and illegal instructions.
+ */
+- EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
++ EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
+
+ . = 0x1100
+ /*
+diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
+index 858fbc8b19f32..bc57e3a82d689 100644
+--- a/arch/powerpc/kernel/head_book3s_32.S
++++ b/arch/powerpc/kernel/head_book3s_32.S
+@@ -278,12 +278,6 @@ MachineCheck:
+ 7: EXCEPTION_PROLOG_2
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ #ifdef CONFIG_PPC_CHRP
+-#ifdef CONFIG_VMAP_STACK
+- mfspr r4, SPRN_SPRG_THREAD
+- tovirt(r4, r4)
+- lwz r4, RTAS_SP(r4)
+- cmpwi cr1, r4, 0
+-#endif
+ beq cr1, machine_check_tramp
+ twi 31, 0, 0
+ #else
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index cc7a6271b6b4e..e8a548447dd68 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -269,6 +269,31 @@ again:
+ }
+ }
+
++#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
++static inline void replay_soft_interrupts_irqrestore(void)
++{
++ unsigned long kuap_state = get_kuap();
++
++ /*
++ * Check if anything calls local_irq_enable/restore() when KUAP is
++ * disabled (user access enabled). We handle that case here by saving
++ * and re-locking AMR but we shouldn't get here in the first place,
++ * hence the warning.
++ */
++ kuap_check_amr();
++
++ if (kuap_state != AMR_KUAP_BLOCKED)
++ set_kuap(AMR_KUAP_BLOCKED);
++
++ replay_soft_interrupts();
++
++ if (kuap_state != AMR_KUAP_BLOCKED)
++ set_kuap(kuap_state);
++}
++#else
++#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
++#endif
++
+ notrace void arch_local_irq_restore(unsigned long mask)
+ {
+ unsigned char irq_happened;
+@@ -332,7 +357,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+- replay_soft_interrupts();
++ replay_soft_interrupts_irqrestore();
+ local_paca->irq_happened = 0;
+
+ trace_hardirqs_on();
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index e9d4eb6144e1f..ccf77b985c8f6 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -1331,14 +1331,10 @@ static void __init prom_check_platform_support(void)
+ if (prop_len > sizeof(vec))
+ prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
+ prop_len);
+- prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+- &vec, sizeof(vec));
+- for (i = 0; i < sizeof(vec); i += 2) {
+- prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+- , vec[i]
+- , vec[i + 1]);
+- prom_parse_platform_support(vec[i], vec[i + 1],
+- &supported);
++ prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
++ for (i = 0; i < prop_len; i += 2) {
++ prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
++ prom_parse_platform_support(vec[i], vec[i + 1], &supported);
+ }
+ }
+
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 67feb35244606..83633a24ce788 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -53,6 +53,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+ #include
+
+@@ -1030,6 +1031,7 @@ void __init time_init(void)
+ tick_setup_hrtimer_broadcast();
+
+ of_clk_init(NULL);
++ enable_sched_clock_irqtime();
+ }
+
+ /*
+diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
+index d0e459bb2f05a..9842e33533df1 100644
+--- a/arch/powerpc/kexec/elf_64.c
++++ b/arch/powerpc/kexec/elf_64.c
+@@ -102,7 +102,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
+ pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
+ }
+
+- fdt_size = fdt_totalsize(initial_boot_params) * 2;
++ fdt_size = kexec_fdt_totalsize_ppc64(image);
+ fdt = kmalloc(fdt_size, GFP_KERNEL);
+ if (!fdt) {
+ pr_err("Not enough memory for the device tree.\n");
+diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
+index c69bcf9b547a8..02b9e4d0dc40b 100644
+--- a/arch/powerpc/kexec/file_load_64.c
++++ b/arch/powerpc/kexec/file_load_64.c
+@@ -21,6 +21,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+ #include
+ #include
+@@ -925,6 +926,40 @@ out:
+ return ret;
+ }
+
++/**
++ * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT
++ * for kexec/kdump kernel.
++ * @image: kexec image being loaded.
++ *
++ * Returns the estimated size needed for kexec/kdump kernel FDT.
++ */
++unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
++{
++ unsigned int fdt_size;
++ u64 usm_entries;
++
++ /*
++ * The below estimate more than accounts for a typical kexec case where
++ * the additional space is to accommodate things like kexec cmdline,
++ * chosen node with properties for initrd start & end addresses and
++ * a property to indicate kexec boot..
++ */
++ fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE);
++ if (image->type != KEXEC_TYPE_CRASH)
++ return fdt_size;
++
++ /*
++ * For kdump kernel, also account for linux,usable-memory and
++ * linux,drconf-usable-memory properties. Get an approximate on the
++ * number of usable memory entries and use for FDT size estimation.
++ */
++ usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
++ (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
++ fdt_size += (unsigned int)(usm_entries * sizeof(u64));
++
++ return fdt_size;
++}
++
+ /**
+ * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
+ * being loaded.
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index 549591d9aaa2c..e45644657d49d 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -54,6 +54,7 @@ config KVM_BOOK3S_32
+ select KVM
+ select KVM_BOOK3S_32_HANDLER
+ select KVM_BOOK3S_PR_POSSIBLE
++ select PPC_FPU
+ help
+ Support running unmodified book3s_32 guest kernels
+ in virtual machines on book3s_32 host processors.
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index cf52d26f49cd7..25966ae3271ef 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1518,7 +1518,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
+ return emulated;
+ }
+
+-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+@@ -1536,7 +1536,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ return result;
+ }
+
+-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+@@ -1554,7 +1554,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ return result;
+ }
+
+-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+@@ -1572,7 +1572,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ return result;
+ }
+
+-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index ede093e962347..bb5c20d4ca91c 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -1306,9 +1306,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ if ((word & 0xfe2) == 2)
+ op->type = SYSCALL;
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+- (word & 0xfe3) == 1)
++ (word & 0xfe3) == 1) { /* scv */
+ op->type = SYSCALL_VECTORED_0;
+- else
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
++ } else
+ op->type = UNKNOWN;
+ return 0;
+ #endif
+@@ -1412,7 +1414,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #ifdef __powerpc64__
+ case 1:
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+- return -1;
++ goto unknown_opcode;
+
+ prefix_r = GET_PREFIX_R(word);
+ ra = GET_PREFIX_RA(suffix);
+@@ -1445,8 +1447,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ #ifdef __powerpc64__
+ case 4:
++ /*
++ * There are very many instructions with this primary opcode
++ * introduced in the ISA as early as v2.03. However, the ones
++ * we currently emulate were all introduced with ISA 3.0
++ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+
+ switch (word & 0x3f) {
+ case 48: /* maddhd */
+@@ -1472,7 +1479,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ * There are other instructions from ISA 3.0 with the same
+ * primary opcode which do not have emulation support yet.
+ */
+- return -1;
++ goto unknown_opcode;
+ #endif
+
+ case 7: /* mulli */
+@@ -1532,6 +1539,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ case 19:
+ if (((word >> 1) & 0x1f) == 2) {
+ /* addpcis */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ imm = (short) (word & 0xffc1); /* d0 + d2 fields */
+ imm |= (word >> 15) & 0x3e; /* d1 field */
+ op->val = regs->nip + (imm << 16) + 4;
+@@ -1844,7 +1853,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #ifdef __powerpc64__
+ case 265: /* modud */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ op->val = regs->gpr[ra] % regs->gpr[rb];
+ goto compute_done;
+ #endif
+@@ -1854,7 +1863,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ case 267: /* moduw */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ op->val = (unsigned int) regs->gpr[ra] %
+ (unsigned int) regs->gpr[rb];
+ goto compute_done;
+@@ -1891,7 +1900,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #endif
+ case 755: /* darn */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ switch (ra & 0x3) {
+ case 0:
+ /* 32-bit conditioned */
+@@ -1909,18 +1918,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ goto compute_done;
+ }
+
+- return -1;
++ goto unknown_opcode;
+ #ifdef __powerpc64__
+ case 777: /* modsd */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ op->val = (long int) regs->gpr[ra] %
+ (long int) regs->gpr[rb];
+ goto compute_done;
+ #endif
+ case 779: /* modsw */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ op->val = (int) regs->gpr[ra] %
+ (int) regs->gpr[rb];
+ goto compute_done;
+@@ -1997,14 +2006,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #endif
+ case 538: /* cnttzw */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ val = (unsigned int) regs->gpr[rd];
+ op->val = (val ? __builtin_ctz(val) : 32);
+ goto logical_done;
+ #ifdef __powerpc64__
+ case 570: /* cnttzd */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ val = regs->gpr[rd];
+ op->val = (val ? __builtin_ctzl(val) : 64);
+ goto logical_done;
+@@ -2114,7 +2123,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ case 890: /* extswsli with sh_5 = 0 */
+ case 891: /* extswsli with sh_5 = 1 */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+- return -1;
++ goto unknown_opcode;
+ op->type = COMPUTE + SETREG;
+ sh = rb | ((word & 2) << 4);
+ val = (signed int) regs->gpr[rd];
+@@ -2441,6 +2450,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 268: /* lxvx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 16;
+@@ -2450,6 +2461,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ case 269: /* lxvl */
+ case 301: { /* lxvll */
+ int nb;
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->ea = ra ? regs->gpr[ra] : 0;
+ nb = regs->gpr[rb] & 0xff;
+@@ -2470,13 +2483,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ case 333: /* lxvpx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+- return -1;
++ goto unknown_opcode;
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->type = MKOP(LOAD_VSX, 0, 32);
+ op->element_size = 32;
+ break;
+
+ case 364: /* lxvwsx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 4);
+ op->element_size = 4;
+@@ -2484,6 +2499,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 396: /* stxvx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 16;
+@@ -2493,6 +2510,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ case 397: /* stxvl */
+ case 429: { /* stxvll */
+ int nb;
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->ea = ra ? regs->gpr[ra] : 0;
+ nb = regs->gpr[rb] & 0xff;
+@@ -2506,7 +2525,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ }
+ case 461: /* stxvpx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+- return -1;
++ goto unknown_opcode;
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->type = MKOP(STORE_VSX, 0, 32);
+ op->element_size = 32;
+@@ -2544,6 +2563,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 781: /* lxsibzx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 1);
+ op->element_size = 8;
+@@ -2551,6 +2572,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 812: /* lxvh8x */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 2;
+@@ -2558,6 +2581,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 813: /* lxsihzx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 2);
+ op->element_size = 8;
+@@ -2571,6 +2596,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 876: /* lxvb16x */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 1;
+@@ -2584,6 +2611,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 909: /* stxsibx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 1);
+ op->element_size = 8;
+@@ -2591,6 +2620,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 940: /* stxvh8x */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 2;
+@@ -2598,6 +2629,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 941: /* stxsihx */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 2);
+ op->element_size = 8;
+@@ -2611,6 +2644,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 1004: /* stxvb16x */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 1;
+@@ -2719,12 +2754,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ op->type = MKOP(LOAD_FP, 0, 16);
+ break;
+ case 2: /* lxsd */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, 0, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 3: /* lxssp */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, 0, 4);
+ op->element_size = 8;
+@@ -2754,7 +2793,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ #ifdef CONFIG_VSX
+ case 6:
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+- return -1;
++ goto unknown_opcode;
+ op->ea = dqform_ea(word, regs);
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->element_size = 32;
+@@ -2777,6 +2816,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 1: /* lxv */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->ea = dqform_ea(word, regs);
+ if (word & 8)
+ op->reg = rd + 32;
+@@ -2787,6 +2828,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ case 2: /* stxsd with LSB of DS field = 0 */
+ case 6: /* stxsd with LSB of DS field = 1 */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->ea = dsform_ea(word, regs);
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, 0, 8);
+@@ -2796,6 +2839,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ case 3: /* stxssp with LSB of DS field = 0 */
+ case 7: /* stxssp with LSB of DS field = 1 */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->ea = dsform_ea(word, regs);
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, 0, 4);
+@@ -2804,6 +2849,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+
+ case 5: /* stxv */
++ if (!cpu_has_feature(CPU_FTR_ARCH_300))
++ goto unknown_opcode;
+ op->ea = dqform_ea(word, regs);
+ if (word & 8)
+ op->reg = rd + 32;
+@@ -2833,7 +2880,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ break;
+ case 1: /* Prefixed instructions */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+- return -1;
++ goto unknown_opcode;
+
+ prefix_r = GET_PREFIX_R(word);
+ ra = GET_PREFIX_RA(suffix);
+@@ -2972,6 +3019,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ }
+
++ if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
++ switch (GETTYPE(op->type)) {
++ case LOAD:
++ if (ra == rd)
++ goto unknown_opcode;
++ fallthrough;
++ case STORE:
++ case LOAD_FP:
++ case STORE_FP:
++ if (ra == 0)
++ goto unknown_opcode;
++ }
++ }
++
+ #ifdef CONFIG_VSX
+ if ((GETTYPE(op->type) == LOAD_VSX ||
+ GETTYPE(op->type) == STORE_VSX) &&
+@@ -2982,6 +3043,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+
+ return 0;
+
++ unknown_opcode:
++ op->type = UNKNOWN;
++ return 0;
++
+ logical_done:
+ if (word & 1)
+ set_cr0(regs, op);
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 16e86ba8aa209..f6b7749d6ada7 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
+ #define NEXT_PROPERTY 3
+ #define PREV_PARENT 4
+ #define MORE_MEMORY 5
+-#define CALL_AGAIN -2
+ #define ERR_CFG_USE -9003
+
+ struct device_node *dlpar_configure_connector(__be32 drc_index,
+@@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
+
+ spin_unlock(&rtas_data_buf_lock);
+
++ if (rtas_busy_delay(rc))
++ continue;
++
+ switch (rc) {
+ case COMPLETE:
+ break;
+@@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
+ last_dn = last_dn->parent;
+ break;
+
+- case CALL_AGAIN:
+- break;
+-
+ case MORE_MEMORY:
+ case ERR_CFG_USE:
+ default:
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 0cfd6da784f84..71a315e73cbe7 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -32,9 +32,10 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+ # Disable -pg to prevent insert call site
+ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+
+-# Disable gcov profiling for VDSO code
++# Disable profiling and instrumentation for VDSO code
+ GCOV_PROFILE := n
+ KCOV_INSTRUMENT := n
++KASAN_SANITIZE := n
+
+ # Force dependency
+ $(obj)/vdso.o: $(obj)/vdso.so
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index 5aaa2ca6a9286..978a35ea6081f 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -136,7 +136,8 @@ static int do_account_vtime(struct task_struct *tsk)
+ " stck %1" /* Store current tod clock value */
+ #endif
+ : "=Q" (S390_lowcore.last_update_timer),
+- "=Q" (S390_lowcore.last_update_clock));
++ "=Q" (S390_lowcore.last_update_clock)
++ : : "cc");
+ clock = S390_lowcore.last_update_clock - clock;
+ timer -= S390_lowcore.last_update_timer;
+
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index c9c34dc52b7d8..639dde28124a2 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -494,7 +494,7 @@ config COMPAT
+ bool
+ depends on SPARC64
+ default y
+- select COMPAT_BINFMT_ELF
++ select COMPAT_BINFMT_ELF if BINFMT_ELF
+ select HAVE_UID16
+ select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
+diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
+index bd48575172c32..3a66e62eb2a0e 100644
+--- a/arch/sparc/kernel/led.c
++++ b/arch/sparc/kernel/led.c
+@@ -50,6 +50,7 @@ static void led_blink(struct timer_list *unused)
+ add_timer(&led_blink_timer);
+ }
+
++#ifdef CONFIG_PROC_FS
+ static int led_proc_show(struct seq_file *m, void *v)
+ {
+ if (get_auxio() & AUXIO_LED)
+@@ -111,6 +112,7 @@ static const struct proc_ops led_proc_ops = {
+ .proc_release = single_release,
+ .proc_write = led_proc_write,
+ };
++#endif
+
+ static struct proc_dir_entry *led;
+
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
+index b89d42b29e344..f427f34b8b79b 100644
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -142,6 +142,7 @@ __bzero:
+ ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
+ ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
+ 13:
++ EXT(12b, 13b, 21f)
+ be 8f
+ andcc %o1, 4, %g0
+
+diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
+index 4337b4ced0954..e82e203f5f419 100644
+--- a/arch/um/include/shared/skas/mm_id.h
++++ b/arch/um/include/shared/skas/mm_id.h
+@@ -12,6 +12,7 @@ struct mm_id {
+ int pid;
+ } u;
+ unsigned long stack;
++ int kill;
+ };
+
+ #endif
+diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
+index 61776790cd678..5be1b0da9f3be 100644
+--- a/arch/um/kernel/tlb.c
++++ b/arch/um/kernel/tlb.c
+@@ -125,6 +125,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
+ struct host_vm_op *last;
+ int fd = -1, ret = 0;
+
++ if (virt + len > STUB_START && virt < STUB_END)
++ return -EINVAL;
++
+ if (hvc->userspace)
+ fd = phys_mapping(phys, &offset);
+ else
+@@ -162,7 +165,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
+ struct host_vm_op *last;
+ int ret = 0;
+
+- if ((addr >= STUB_START) && (addr < STUB_END))
++ if (addr + len > STUB_START && addr < STUB_END)
+ return -EINVAL;
+
+ if (hvc->index != 0) {
+@@ -192,6 +195,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
+ struct host_vm_op *last;
+ int ret = 0;
+
++ if (addr + len > STUB_START && addr < STUB_END)
++ return -EINVAL;
++
+ if (hvc->index != 0) {
+ last = &hvc->ops[hvc->index - 1];
+ if ((last->type == MPROTECT) &&
+@@ -346,12 +352,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
+
+ /* This is not an else because ret is modified above */
+ if (ret) {
++ struct mm_id *mm_idp = ¤t->mm->context.id;
++
+ printk(KERN_ERR "fix_range_common: failed, killing current "
+ "process: %d\n", task_tgid_vnr(current));
+- /* We are under mmap_lock, release it such that current can terminate */
+- mmap_write_unlock(current->mm);
+- force_sig(SIGKILL);
+- do_signal(¤t->thread.regs);
++ mm_idp->kill = 1;
+ }
+ }
+
+@@ -472,6 +477,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
+ struct mm_id *mm_id;
+
+ address &= PAGE_MASK;
++
++ if (address >= STUB_START && address < STUB_END)
++ goto kill;
++
+ pgd = pgd_offset(mm, address);
+ if (!pgd_present(*pgd))
+ goto kill;
+diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
+index 0621d521208e4..02c4741ade5e8 100644
+--- a/arch/um/os-Linux/skas/process.c
++++ b/arch/um/os-Linux/skas/process.c
+@@ -249,6 +249,7 @@ static int userspace_tramp(void *stack)
+ }
+
+ int userspace_pid[NR_CPUS];
++int kill_userspace_mm[NR_CPUS];
+
+ /**
+ * start_userspace() - prepare a new userspace process
+@@ -342,6 +343,8 @@ void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
+ interrupt_end();
+
+ while (1) {
++ if (kill_userspace_mm[0])
++ fatal_sigsegv();
+
+ /*
+ * This can legitimately fail if the process loads a
+@@ -663,4 +666,5 @@ void reboot_skas(void)
+ void __switch_mm(struct mm_id *mm_idp)
+ {
+ userspace_pid[0] = mm_idp->u.pid;
++ kill_userspace_mm[0] = mm_idp->kill;
+ }
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index ad8a7188a2bf7..f9a1d98e75349 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -686,7 +686,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
+- struct gcm_context_data data AESNI_ALIGN_ATTR;
++ u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
++ struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
+ struct scatter_walk dst_sg_walk = {};
+ unsigned long left = req->cryptlen;
+ unsigned long len, srclen, dstlen;
+@@ -735,8 +736,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ }
+
+ kernel_fpu_begin();
+- gcm_tfm->init(aes_ctx, &data, iv,
+- hash_subkey, assoc, assoclen);
++ gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
+ if (req->src != req->dst) {
+ while (left) {
+ src = scatterwalk_map(&src_sg_walk);
+@@ -746,10 +746,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ len = min(srclen, dstlen);
+ if (len) {
+ if (enc)
+- gcm_tfm->enc_update(aes_ctx, &data,
++ gcm_tfm->enc_update(aes_ctx, data,
+ dst, src, len);
+ else
+- gcm_tfm->dec_update(aes_ctx, &data,
++ gcm_tfm->dec_update(aes_ctx, data,
+ dst, src, len);
+ }
+ left -= len;
+@@ -767,10 +767,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ len = scatterwalk_clamp(&src_sg_walk, left);
+ if (len) {
+ if (enc)
+- gcm_tfm->enc_update(aes_ctx, &data,
++ gcm_tfm->enc_update(aes_ctx, data,
+ src, src, len);
+ else
+- gcm_tfm->dec_update(aes_ctx, &data,
++ gcm_tfm->dec_update(aes_ctx, data,
+ src, src, len);
+ }
+ left -= len;
+@@ -779,7 +779,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ scatterwalk_done(&src_sg_walk, 1, left);
+ }
+ }
+- gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
++ gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
+ kernel_fpu_end();
+
+ if (!assocmem)
+@@ -828,7 +828,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ unsigned int i;
+ __be32 counter = cpu_to_be32(1);
+
+@@ -855,7 +856,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ unsigned int i;
+
+ if (unlikely(req->assoclen != 16 && req->assoclen != 20))
+@@ -985,7 +987,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ __be32 counter = cpu_to_be32(1);
+
+ memcpy(iv, req->iv, 12);
+@@ -1001,7 +1004,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+
+ memcpy(iv, req->iv, 12);
+ *((__be32 *)(iv+12)) = counter;
+diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
+index 0904f5676e4d8..f89ae8ada64fe 100644
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -270,7 +270,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+
+ instrumentation_begin();
+ run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+- instrumentation_begin();
++ instrumentation_end();
+
+ set_irq_regs(old_regs);
+
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 9aad0e0876fba..fda3e7747c223 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void)
+ }
+
+
+-/** Disable VMX on the current CPU
++/**
++ * cpu_vmxoff() - Disable VMX on the current CPU
+ *
+- * vmxoff causes a undefined-opcode exception if vmxon was not run
+- * on the CPU previously. Only call this function if you know VMX
+- * is enabled.
++ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
++ *
++ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
++ * atomically track post-VMXON state, e.g. this may be called in NMI context.
++ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
++ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
++ * magically in RM, VM86, compat mode, or at CPL>0.
+ */
+ static inline void cpu_vmxoff(void)
+ {
+- asm volatile ("vmxoff");
++ asm_volatile_goto("1: vmxoff\n\t"
++ _ASM_EXTABLE(1b, %l[fault]) :::: fault);
++fault:
+ cr4_clear_bits(X86_CR4_VMXE);
+ }
+
+diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
+index c519fc5f69480..8df81a3ed9457 100644
+--- a/arch/x86/kernel/cpu/sgx/main.c
++++ b/arch/x86/kernel/cpu/sgx/main.c
+@@ -700,25 +700,27 @@ static bool __init sgx_page_cache_init(void)
+ return true;
+ }
+
+-static void __init sgx_init(void)
++static int __init sgx_init(void)
+ {
+ int ret;
+ int i;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SGX))
+- return;
++ return -ENODEV;
+
+ if (!sgx_page_cache_init())
+- return;
++ return -ENOMEM;
+
+- if (!sgx_page_reclaimer_init())
++ if (!sgx_page_reclaimer_init()) {
++ ret = -ENOMEM;
+ goto err_page_cache;
++ }
+
+ ret = sgx_drv_init();
+ if (ret)
+ goto err_kthread;
+
+- return;
++ return 0;
+
+ err_kthread:
+ kthread_stop(ksgxd_tsk);
+@@ -728,6 +730,8 @@ err_page_cache:
+ vfree(sgx_epc_sections[i].pages);
+ memunmap(sgx_epc_sections[i].virt_addr);
+ }
++
++ return ret;
+ }
+
+ device_initcall(sgx_init);
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index 8a67d1fa8dc58..ed8ac6bcbafb2 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -182,6 +182,13 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
+ err = security_locked_down(LOCKDOWN_MSR);
+ if (err)
+ break;
++
++ err = filter_write(regs[1]);
++ if (err)
++ return err;
++
++ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
++
+ err = wrmsr_safe_regs_on_cpu(cpu, regs);
+ if (err)
+ break;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index db115943e8bdc..efbaef8b4de98 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -538,31 +538,21 @@ static void emergency_vmx_disable_all(void)
+ local_irq_disable();
+
+ /*
+- * We need to disable VMX on all CPUs before rebooting, otherwise
+- * we risk hanging up the machine, because the CPU ignores INIT
+- * signals when VMX is enabled.
++ * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
++ * the machine, because the CPU blocks INIT when it's in VMX root.
+ *
+- * We can't take any locks and we may be on an inconsistent
+- * state, so we use NMIs as IPIs to tell the other CPUs to disable
+- * VMX and halt.
++ * We can't take any locks and we may be on an inconsistent state, so
++ * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
+ *
+- * For safety, we will avoid running the nmi_shootdown_cpus()
+- * stuff unnecessarily, but we don't have a way to check
+- * if other CPUs have VMX enabled. So we will call it only if the
+- * CPU we are running on has VMX enabled.
+- *
+- * We will miss cases where VMX is not enabled on all CPUs. This
+- * shouldn't do much harm because KVM always enable VMX on all
+- * CPUs anyway. But we can miss it on the small window where KVM
+- * is still enabling VMX.
++ * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
++ * doesn't prevent a different CPU from being in VMX root operation.
+ */
+- if (cpu_has_vmx() && cpu_vmx_enabled()) {
+- /* Disable VMX on this CPU. */
+- cpu_vmxoff();
++ if (cpu_has_vmx()) {
++ /* Safely force _this_ CPU out of VMX root operation. */
++ __cpu_emergency_vmxoff();
+
+- /* Halt and disable VMX on the other CPUs */
++ /* Halt and exit VMX root operation on the other CPUs. */
+ nmi_shootdown_cpus(vmxoff_nmi);
+-
+ }
+ }
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 66a08322988f2..1453b9b794425 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2564,12 +2564,12 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
+ ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
+ ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
+
+- val = GET_SMSTATE(u32, smstate, 0x7f68);
++ val = GET_SMSTATE(u64, smstate, 0x7f68);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
+- val = GET_SMSTATE(u32, smstate, 0x7f60);
++ val = GET_SMSTATE(u64, smstate, 0x7f60);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index b56d604809b8a..17976998bffbc 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -1067,7 +1067,8 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
+
+ pfn = spte_to_pfn(iter.old_spte);
+ if (kvm_is_reserved_pfn(pfn) ||
+- !PageTransCompoundMap(pfn_to_page(pfn)))
++ (!PageCompound(pfn_to_page(pfn)) &&
++ !kvm_is_zone_device_pfn(pfn)))
+ continue;
+
+ tdp_mmu_set_spte(kvm, &iter, 0);
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index db30670dd8c4a..d36773c7b5359 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -51,6 +51,23 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+ nested_svm_vmexit(svm);
+ }
+
++static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
++{
++ struct vcpu_svm *svm = to_svm(vcpu);
++ WARN_ON(!is_guest_mode(vcpu));
++
++ if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
++ !svm->nested.nested_run_pending) {
++ svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
++ svm->vmcb->control.exit_code_hi = 0;
++ svm->vmcb->control.exit_info_1 = fault->error_code;
++ svm->vmcb->control.exit_info_2 = fault->address;
++ nested_svm_vmexit(svm);
++ } else {
++ kvm_inject_page_fault(vcpu, fault);
++ }
++}
++
+ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+@@ -58,7 +75,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
+ u64 pdpte;
+ int ret;
+
+- ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
++ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
+ offset_in_page(cr3) + index * 8, 8);
+ if (ret)
+ return 0;
+@@ -446,6 +463,9 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
+ if (ret)
+ return ret;
+
++ if (!npt_enabled)
++ svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
++
+ svm_set_gif(svm, true);
+
+ return 0;
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 3442d44ca53b8..825ef6d281c98 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1105,12 +1105,12 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ static void svm_check_invpcid(struct vcpu_svm *svm)
+ {
+ /*
+- * Intercept INVPCID instruction only if shadow page table is
+- * enabled. Interception is not required with nested page table
+- * enabled.
++ * Intercept INVPCID if shadow paging is enabled to sync/free shadow
++ * roots, or if INVPCID is disabled in the guest to inject #UD.
+ */
+ if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
+- if (!npt_enabled)
++ if (!npt_enabled ||
++ !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
+ svm_set_intercept(svm, INTERCEPT_INVPCID);
+ else
+ svm_clr_intercept(svm, INTERCEPT_INVPCID);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 1b404e4d7dd8e..b967c1c774a1f 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1782,6 +1782,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
+
+ bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+ {
++ xfer_to_guest_mode_prepare();
+ return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
+ xfer_to_guest_mode_work_pending();
+ }
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index f1f1b5a0956a0..441c3e9b89719 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -54,7 +54,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
+ * 32-bit mode:
+ *
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+- * Check that here and ignore it.
++ * Check that here and ignore it. This is AMD erratum #91.
+ *
+ * 64-bit mode:
+ *
+@@ -83,11 +83,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+ #ifdef CONFIG_X86_64
+ case 0x40:
+ /*
+- * In AMD64 long mode 0x40..0x4F are valid REX prefixes
+- * Need to figure out under what instruction mode the
+- * instruction was issued. Could check the LDT for lm,
+- * but for now it's good enough to assume that long
+- * mode only uses well known segments or kernel.
++ * In 64-bit mode 0x40..0x4F are valid REX prefixes
+ */
+ return (!user_mode(regs) || user_64bit_mode(regs));
+ #endif
+@@ -127,20 +123,31 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+ instr = (void *)convert_ip_to_linear(current, regs);
+ max_instr = instr + 15;
+
+- if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
+- return 0;
++ /*
++ * This code has historically always bailed out if IP points to a
++ * not-present page (e.g. due to a race). No one has ever
++ * complained about this.
++ */
++ pagefault_disable();
+
+ while (instr < max_instr) {
+ unsigned char opcode;
+
+- if (get_kernel_nofault(opcode, instr))
+- break;
++ if (user_mode(regs)) {
++ if (get_user(opcode, instr))
++ break;
++ } else {
++ if (get_kernel_nofault(opcode, instr))
++ break;
++ }
+
+ instr++;
+
+ if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
+ break;
+ }
++
++ pagefault_enable();
+ return prefetch;
+ }
+
+diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
+index 8f665c352bf0d..ca311aaa67b88 100644
+--- a/arch/x86/mm/pat/memtype.c
++++ b/arch/x86/mm/pat/memtype.c
+@@ -1164,12 +1164,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
+
+ static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
++ kfree(v);
+ ++*pos;
+ return memtype_get_idx(*pos);
+ }
+
+ static void memtype_seq_stop(struct seq_file *seq, void *v)
+ {
++ kfree(v);
+ }
+
+ static int memtype_seq_show(struct seq_file *seq, void *v)
+@@ -1181,8 +1183,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
+ entry_print->end,
+ cattr_name(entry_print->type));
+
+- kfree(entry_print);
+-
+ return 0;
+ }
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 9e81d1052091f..5720978e4d09b 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2937,6 +2937,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ }
+
+ bfqd->in_service_queue = bfqq;
++ bfqd->in_serv_last_pos = 0;
+ }
+
+ /*
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 43990b1d148b8..89447d32d9ea5 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -481,6 +481,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
+ }
+ EXPORT_SYMBOL(blk_queue_io_opt);
+
++static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
++{
++ sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
++ if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
++ sectors = PAGE_SIZE >> SECTOR_SHIFT;
++ return sectors;
++}
++
+ /**
+ * blk_stack_limits - adjust queue_limits for stacked devices
+ * @t: the stacking driver limits (top device)
+@@ -607,6 +615,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ ret = -1;
+ }
+
++ t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
++ t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
++ t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
++
+ /* Discard alignment and granularity */
+ if (b->discard_granularity) {
+ alignment = queue_limit_discard_alignment(b, start);
+diff --git a/block/bsg.c b/block/bsg.c
+index d7bae94b64d95..3d78e843a83f6 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
+ return PTR_ERR(rq);
+
+ ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
+- if (ret)
++ if (ret) {
++ blk_put_request(rq);
+ return ret;
++ }
+
+ rq->timeout = msecs_to_jiffies(hdr.timeout);
+ if (!rq->timeout)
+diff --git a/block/genhd.c b/block/genhd.c
+index 9e741a4f351be..07a0ef741de19 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -74,7 +74,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
+ return false;
+
+ pr_info("%s: detected capacity change from %lld to %lld\n",
+- disk->disk_name, size, capacity);
++ disk->disk_name, capacity, size);
+
+ /*
+ * Historically we did not send a uevent for changes to/from an empty
+diff --git a/block/ioctl.c b/block/ioctl.c
+index d61d652078f41..ff241e663c018 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -81,20 +81,27 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
+ }
+ #endif
+
+-static int blkdev_reread_part(struct block_device *bdev)
++static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
+ {
+- int ret;
++ struct block_device *tmp;
+
+ if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+- mutex_lock(&bdev->bd_mutex);
+- ret = bdev_disk_changed(bdev, false);
+- mutex_unlock(&bdev->bd_mutex);
++ /*
++ * Reopen the device to revalidate the driver state and force a
++ * partition rescan.
++ */
++ mode &= ~FMODE_EXCL;
++ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+
+- return ret;
++ tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
++ if (IS_ERR(tmp))
++ return PTR_ERR(tmp);
++ blkdev_put(tmp, mode);
++ return 0;
+ }
+
+ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+@@ -498,7 +505,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
+ bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
+ return 0;
+ case BLKRRPART:
+- return blkdev_reread_part(bdev);
++ return blkdev_reread_part(bdev, mode);
+ case BLKTRACESTART:
+ case BLKTRACESTOP:
+ case BLKTRACETEARDOWN:
+diff --git a/certs/blacklist.c b/certs/blacklist.c
+index 6514f9ebc943f..f1c434b04b5e4 100644
+--- a/certs/blacklist.c
++++ b/certs/blacklist.c
+@@ -162,7 +162,7 @@ static int __init blacklist_init(void)
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA |
+- KEY_FLAG_KEEP,
++ KEY_ALLOC_SET_KEEP,
+ NULL, NULL);
+ if (IS_ERR(blacklist_keyring))
+ panic("Can't allocate system blacklist keyring\n");
+diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
+index 66fcb2ea81544..fca63b559f655 100644
+--- a/crypto/ecdh_helper.c
++++ b/crypto/ecdh_helper.c
+@@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
+ if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
+ return -EINVAL;
+
++ if (unlikely(len < secret.len))
++ return -EINVAL;
++
+ ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id));
+ ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size));
+ if (secret.len != crypto_ecdh_key_len(params))
+diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
+index 63350c4ad4617..f4c31049601c9 100644
+--- a/crypto/michael_mic.c
++++ b/crypto/michael_mic.c
+@@ -7,7 +7,7 @@
+ * Copyright (c) 2004 Jouni Malinen
+ */
+ #include
+-#include
++#include
+ #include
+ #include
+ #include
+@@ -19,7 +19,7 @@ struct michael_mic_ctx {
+ };
+
+ struct michael_mic_desc_ctx {
+- u8 pending[4];
++ __le32 pending;
+ size_t pending_len;
+
+ u32 l, r;
+@@ -60,13 +60,12 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+ {
+ struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
+- const __le32 *src;
+
+ if (mctx->pending_len) {
+ int flen = 4 - mctx->pending_len;
+ if (flen > len)
+ flen = len;
+- memcpy(&mctx->pending[mctx->pending_len], data, flen);
++ memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen);
+ mctx->pending_len += flen;
+ data += flen;
+ len -= flen;
+@@ -74,23 +73,21 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
+ if (mctx->pending_len < 4)
+ return 0;
+
+- src = (const __le32 *)mctx->pending;
+- mctx->l ^= le32_to_cpup(src);
++ mctx->l ^= le32_to_cpu(mctx->pending);
+ michael_block(mctx->l, mctx->r);
+ mctx->pending_len = 0;
+ }
+
+- src = (const __le32 *)data;
+-
+ while (len >= 4) {
+- mctx->l ^= le32_to_cpup(src++);
++ mctx->l ^= get_unaligned_le32(data);
+ michael_block(mctx->l, mctx->r);
++ data += 4;
+ len -= 4;
+ }
+
+ if (len > 0) {
+ mctx->pending_len = len;
+- memcpy(mctx->pending, src, len);
++ memcpy(&mctx->pending, data, len);
+ }
+
+ return 0;
+@@ -100,8 +97,7 @@ static int michael_update(struct shash_desc *desc, const u8 *data,
+ static int michael_final(struct shash_desc *desc, u8 *out)
+ {
+ struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
+- u8 *data = mctx->pending;
+- __le32 *dst = (__le32 *)out;
++ u8 *data = (u8 *)&mctx->pending;
+
+ /* Last block and padding (0x5a, 4..7 x 0) */
+ switch (mctx->pending_len) {
+@@ -123,8 +119,8 @@ static int michael_final(struct shash_desc *desc, u8 *out)
+ /* l ^= 0; */
+ michael_block(mctx->l, mctx->r);
+
+- dst[0] = cpu_to_le32(mctx->l);
+- dst[1] = cpu_to_le32(mctx->r);
++ put_unaligned_le32(mctx->l, out);
++ put_unaligned_le32(mctx->r, out + 4);
+
+ return 0;
+ }
+@@ -135,13 +131,11 @@ static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
+ {
+ struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm);
+
+- const __le32 *data = (const __le32 *)key;
+-
+ if (keylen != 8)
+ return -EINVAL;
+
+- mctx->l = le32_to_cpu(data[0]);
+- mctx->r = le32_to_cpu(data[1]);
++ mctx->l = get_unaligned_le32(key);
++ mctx->r = get_unaligned_le32(key + 4);
+ return 0;
+ }
+
+@@ -156,7 +150,6 @@ static struct shash_alg alg = {
+ .cra_name = "michael_mic",
+ .cra_driver_name = "michael_mic-generic",
+ .cra_blocksize = 8,
+- .cra_alignmask = 3,
+ .cra_ctxsize = sizeof(struct michael_mic_ctx),
+ .cra_module = THIS_MODULE,
+ }
+diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
+index cf91f49101eac..3a14859dbb757 100644
+--- a/drivers/acpi/acpi_configfs.c
++++ b/drivers/acpi/acpi_configfs.c
+@@ -268,7 +268,12 @@ static int __init acpi_configfs_init(void)
+
+ acpi_table_group = configfs_register_default_group(root, "table",
+ &acpi_tables_type);
+- return PTR_ERR_OR_ZERO(acpi_table_group);
++ if (IS_ERR(acpi_table_group)) {
++ configfs_unregister_subsystem(&acpi_configfs);
++ return PTR_ERR(acpi_table_group);
++ }
++
++ return 0;
+ }
+ module_init(acpi_configfs_init);
+
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 24e87b6305731..16b28084c1ca6 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -787,9 +787,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
+ const union acpi_object *obj;
+ int ret;
+
+- if (!val)
+- return -EINVAL;
+-
+ if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+@@ -799,28 +796,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
+ case DEV_PROP_U8:
+ if (obj->integer.value > U8_MAX)
+ return -EOVERFLOW;
+- *(u8 *)val = obj->integer.value;
++
++ if (val)
++ *(u8 *)val = obj->integer.value;
++
+ break;
+ case DEV_PROP_U16:
+ if (obj->integer.value > U16_MAX)
+ return -EOVERFLOW;
+- *(u16 *)val = obj->integer.value;
++
++ if (val)
++ *(u16 *)val = obj->integer.value;
++
+ break;
+ case DEV_PROP_U32:
+ if (obj->integer.value > U32_MAX)
+ return -EOVERFLOW;
+- *(u32 *)val = obj->integer.value;
++
++ if (val)
++ *(u32 *)val = obj->integer.value;
++
+ break;
+ default:
+- *(u64 *)val = obj->integer.value;
++ if (val)
++ *(u64 *)val = obj->integer.value;
++
+ break;
+ }
++
++ if (!val)
++ return 1;
+ } else if (proptype == DEV_PROP_STRING) {
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
+ if (ret)
+ return ret;
+
+- *(char **)val = obj->string.pointer;
++ if (val)
++ *(char **)val = obj->string.pointer;
+
+ return 1;
+ } else {
+@@ -834,7 +846,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
+ {
+ int ret;
+
+- if (!adev)
++ if (!adev || !val)
+ return -EINVAL;
+
+ ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
+@@ -928,10 +940,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ const union acpi_object *items;
+ int ret;
+
+- if (val && nval == 1) {
++ if (nval == 1 || !val) {
+ ret = acpi_data_prop_read_single(data, propname, proptype, val);
+- if (ret >= 0)
++ /*
++ * The overflow error means that the property is there and it is
++ * single-value, but its type does not match, so return.
++ */
++ if (ret >= 0 || ret == -EOVERFLOW)
+ return ret;
++
++ /*
++ * Reading this property as a single-value one failed, but its
++ * value may still be represented as one-element array, so
++ * continue.
++ */
+ }
+
+ ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index ecc304149067c..b5f5ca4e3f343 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -299,10 +299,11 @@ static int amba_remove(struct device *dev)
+ {
+ struct amba_device *pcdev = to_amba_device(dev);
+ struct amba_driver *drv = to_amba_driver(dev->driver);
+- int ret;
++ int ret = 0;
+
+ pm_runtime_get_sync(dev);
+- ret = drv->remove(pcdev);
++ if (drv->remove)
++ ret = drv->remove(pcdev);
+ pm_runtime_put_noidle(dev);
+
+ /* Undo the runtime PM settings in amba_probe() */
+@@ -319,7 +320,9 @@ static int amba_remove(struct device *dev)
+ static void amba_shutdown(struct device *dev)
+ {
+ struct amba_driver *drv = to_amba_driver(dev->driver);
+- drv->shutdown(to_amba_device(dev));
++
++ if (drv->shutdown)
++ drv->shutdown(to_amba_device(dev));
+ }
+
+ /**
+@@ -332,12 +335,13 @@ static void amba_shutdown(struct device *dev)
+ */
+ int amba_driver_register(struct amba_driver *drv)
+ {
+- drv->drv.bus = &amba_bustype;
++ if (!drv->probe)
++ return -EINVAL;
+
+-#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
+- SETFN(probe);
+- SETFN(remove);
+- SETFN(shutdown);
++ drv->drv.bus = &amba_bustype;
++ drv->drv.probe = amba_probe;
++ drv->drv.remove = amba_remove;
++ drv->drv.shutdown = amba_shutdown;
+
+ return driver_register(&drv->drv);
+ }
+diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
+index 49f7acbfcf01e..5b32df5d33adc 100644
+--- a/drivers/ata/ahci_brcm.c
++++ b/drivers/ata/ahci_brcm.c
+@@ -377,6 +377,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
+ if (ret)
+ return ret;
+
++ ret = ahci_platform_enable_regulators(hpriv);
++ if (ret)
++ goto out_disable_clks;
++
+ brcm_sata_init(priv);
+ brcm_sata_phys_enable(priv);
+ brcm_sata_alpm_init(hpriv);
+@@ -406,6 +410,8 @@ out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+ out_disable_phys:
+ brcm_sata_phys_disable(priv);
++ ahci_platform_disable_regulators(hpriv);
++out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+ return ret;
+ }
+@@ -490,6 +496,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ if (ret)
+ goto out_reset;
+
++ ret = ahci_platform_enable_regulators(hpriv);
++ if (ret)
++ goto out_disable_clks;
++
+ /* Must be first so as to configure endianness including that
+ * of the standard AHCI register space.
+ */
+@@ -499,7 +509,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ if (!priv->port_mask) {
+ ret = -ENODEV;
+- goto out_disable_clks;
++ goto out_disable_regulators;
+ }
+
+ /* Must be done before ahci_platform_enable_phys() */
+@@ -524,6 +534,8 @@ out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+ out_disable_phys:
+ brcm_sata_phys_disable(priv);
++out_disable_regulators:
++ ahci_platform_disable_regulators(hpriv);
+ out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+ out_reset:
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
+index a2b59b84bb881..1509cb74705a3 100644
+--- a/drivers/auxdisplay/Kconfig
++++ b/drivers/auxdisplay/Kconfig
+@@ -507,6 +507,3 @@ config PANEL
+ depends on PARPORT
+ select AUXDISPLAY
+ select PARPORT_PANEL
+-
+-config CHARLCD
+- tristate "Character LCD core support" if COMPILE_TEST
+diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
+index d951d54b26f52..d8602843e8a53 100644
+--- a/drivers/auxdisplay/ht16k33.c
++++ b/drivers/auxdisplay/ht16k33.c
+@@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+ {
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+- schedule_delayed_work(&fbdev->work,
+- msecs_to_jiffies(HZ / fbdev->refresh_rate));
++ schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
+ }
+
+ /*
+diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
+index 8336535f1e110..d8b314e7d0fdc 100644
+--- a/drivers/base/auxiliary.c
++++ b/drivers/base/auxiliary.c
+@@ -15,6 +15,7 @@
+ #include
+ #include
+ #include
++#include "base.h"
+
+ static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
+ const struct auxiliary_device *auxdev)
+@@ -260,19 +261,11 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
+ }
+ EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+
+-static int __init auxiliary_bus_init(void)
++void __init auxiliary_bus_init(void)
+ {
+- return bus_register(&auxiliary_bus_type);
++ WARN_ON(bus_register(&auxiliary_bus_type));
+ }
+
+-static void __exit auxiliary_bus_exit(void)
+-{
+- bus_unregister(&auxiliary_bus_type);
+-}
+-
+-module_init(auxiliary_bus_init);
+-module_exit(auxiliary_bus_exit);
+-
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("Auxiliary Bus");
+ MODULE_AUTHOR("David Ertman ");
+diff --git a/drivers/base/base.h b/drivers/base/base.h
+index f5600a83124fa..52b3d7b75c275 100644
+--- a/drivers/base/base.h
++++ b/drivers/base/base.h
+@@ -119,6 +119,11 @@ static inline int hypervisor_init(void) { return 0; }
+ extern int platform_bus_init(void);
+ extern void cpu_dev_init(void);
+ extern void container_dev_init(void);
++#ifdef CONFIG_AUXILIARY_BUS
++extern void auxiliary_bus_init(void);
++#else
++static inline void auxiliary_bus_init(void) { }
++#endif
+
+ struct kobject *virtual_device_parent(struct device *dev);
+
+diff --git a/drivers/base/init.c b/drivers/base/init.c
+index 908e6520e804b..a9f57c22fb9e2 100644
+--- a/drivers/base/init.c
++++ b/drivers/base/init.c
+@@ -32,6 +32,7 @@ void __init driver_init(void)
+ */
+ of_core_init();
+ platform_bus_init();
++ auxiliary_bus_init();
+ cpu_dev_init();
+ memory_dev_init();
+ container_dev_init();
+diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
+index c83be26434e76..966de8a136d90 100644
+--- a/drivers/base/regmap/regmap-sdw.c
++++ b/drivers/base/regmap/regmap-sdw.c
+@@ -13,7 +13,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+- return sdw_write(slave, reg, val);
++ return sdw_write_no_pm(slave, reg, val);
+ }
+
+ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+@@ -22,7 +22,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+- read = sdw_read(slave, reg);
++ read = sdw_read_no_pm(slave, reg);
+ if (read < 0)
+ return read;
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 4a4b2008fbc26..4fcc1a6fb724c 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -443,14 +443,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
+ struct swnode *c = to_swnode(child);
+
+ if (!p || list_empty(&p->children) ||
+- (c && list_is_last(&c->entry, &p->children)))
++ (c && list_is_last(&c->entry, &p->children))) {
++ fwnode_handle_put(child);
+ return NULL;
++ }
+
+ if (c)
+ c = list_next_entry(c, entry);
+ else
+ c = list_first_entry(&p->children, struct swnode, entry);
+- return &c->fwnode;
++
++ fwnode_handle_put(child);
++ return fwnode_handle_get(&c->fwnode);
+ }
+
+ static struct fwnode_handle *
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index dfe1dfc901ccc..0b71292d9d5ab 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4121,23 +4121,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ if (fdc_state[FDC(drive)].rawcmd == 1)
+ fdc_state[FDC(drive)].rawcmd = 2;
+
+- if (!(mode & FMODE_NDELAY)) {
+- if (mode & (FMODE_READ|FMODE_WRITE)) {
+- drive_state[drive].last_checked = 0;
+- clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
+- &drive_state[drive].flags);
+- if (bdev_check_media_change(bdev))
+- floppy_revalidate(bdev->bd_disk);
+- if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
+- goto out;
+- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+- goto out;
+- }
+- res = -EROFS;
+- if ((mode & FMODE_WRITE) &&
+- !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
++ if (mode & (FMODE_READ|FMODE_WRITE)) {
++ drive_state[drive].last_checked = 0;
++ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
++ if (bdev_check_media_change(bdev))
++ floppy_revalidate(bdev->bd_disk);
++ if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
++ goto out;
++ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
+ goto out;
+ }
++
++ res = -EROFS;
++
++ if ((mode & FMODE_WRITE) &&
++ !test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
++ goto out;
++
+ mutex_unlock(&open_lock);
+ mutex_unlock(&floppy_mutex);
+ return 0;
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index e2933cb7a82a3..3279969fc99cb 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1082,7 +1082,7 @@ static ssize_t mm_stat_show(struct device *dev,
+ zram->limit_pages << PAGE_SHIFT,
+ max_used << PAGE_SHIFT,
+ (u64)atomic64_read(&zram->stats.same_pages),
+- pool_stats.pages_compacted,
++ atomic_long_read(&pool_stats.pages_compacted),
+ (u64)atomic64_read(&zram->stats.huge_pages),
+ (u64)atomic64_read(&zram->stats.huge_pages_since));
+ up_read(&zram->init_lock);
+diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
+index 98d53764871f5..2acb719e596f5 100644
+--- a/drivers/bluetooth/btqcomsmd.c
++++ b/drivers/bluetooth/btqcomsmd.c
+@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+
+ btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
+ btqcomsmd_cmd_callback, btq);
+- if (IS_ERR(btq->cmd_channel))
+- return PTR_ERR(btq->cmd_channel);
++ if (IS_ERR(btq->cmd_channel)) {
++ ret = PTR_ERR(btq->cmd_channel);
++ goto destroy_acl_channel;
++ }
+
+ hdev = hci_alloc_dev();
+- if (!hdev)
+- return -ENOMEM;
++ if (!hdev) {
++ ret = -ENOMEM;
++ goto destroy_cmd_channel;
++ }
+
+ hci_set_drvdata(hdev, btq);
+ btq->hdev = hdev;
+@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+ hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ ret = hci_register_dev(hdev);
+- if (ret < 0) {
+- hci_free_dev(hdev);
+- return ret;
+- }
++ if (ret < 0)
++ goto hci_free_dev;
+
+ platform_set_drvdata(pdev, btq);
+
+ return 0;
++
++hci_free_dev:
++ hci_free_dev(hdev);
++destroy_cmd_channel:
++ rpmsg_destroy_ept(btq->cmd_channel);
++destroy_acl_channel:
++ rpmsg_destroy_ept(btq->acl_channel);
++
++ return ret;
+ }
+
+ static int btqcomsmd_remove(struct platform_device *pdev)
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index da57c561642c4..a4f834a50a988 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3195,7 +3195,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+- goto err_out;
++ return;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+@@ -3213,13 +3213,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ */
+ if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
+- if (!data->evt_skb)
+- goto err_out;
++ if (!data->evt_skb) {
++ kfree_skb(skb);
++ return;
++ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+- if (err < 0)
+- goto err_free_skb;
++ if (err < 0) {
++ kfree_skb(data->evt_skb);
++ data->evt_skb = NULL;
++ return;
++ }
+
+ if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
+ &data->flags)) {
+@@ -3228,11 +3233,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ wake_up_bit(&data->flags,
+ BTUSB_TX_WAIT_VND_EVT);
+ }
+-err_out:
+- return;
+-err_free_skb:
+- kfree_skb(data->evt_skb);
+- data->evt_skb = NULL;
+ return;
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index f83d67eafc9f0..637c5b8c2aa1a 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ goto no_schedule;
+
+- if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
+- set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
++ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
++ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
+ goto no_schedule;
+- }
+
+ BT_DBG("");
+
+@@ -174,10 +173,10 @@ restart:
+ kfree_skb(skb);
+ }
+
++ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
+ goto restart;
+
+- clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
+ }
+
+@@ -802,7 +801,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+ * We don't provide read/write/poll interface for user space.
+ */
+ static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t nr)
++ unsigned char *buf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ return 0;
+ }
+@@ -819,29 +819,28 @@ static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
+ return 0;
+ }
+
++static struct tty_ldisc_ops hci_uart_ldisc = {
++ .owner = THIS_MODULE,
++ .magic = TTY_LDISC_MAGIC,
++ .name = "n_hci",
++ .open = hci_uart_tty_open,
++ .close = hci_uart_tty_close,
++ .read = hci_uart_tty_read,
++ .write = hci_uart_tty_write,
++ .ioctl = hci_uart_tty_ioctl,
++ .compat_ioctl = hci_uart_tty_ioctl,
++ .poll = hci_uart_tty_poll,
++ .receive_buf = hci_uart_tty_receive,
++ .write_wakeup = hci_uart_tty_wakeup,
++};
++
+ static int __init hci_uart_init(void)
+ {
+- static struct tty_ldisc_ops hci_uart_ldisc;
+ int err;
+
+ BT_INFO("HCI UART driver ver %s", VERSION);
+
+ /* Register the tty discipline */
+-
+- memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
+- hci_uart_ldisc.magic = TTY_LDISC_MAGIC;
+- hci_uart_ldisc.name = "n_hci";
+- hci_uart_ldisc.open = hci_uart_tty_open;
+- hci_uart_ldisc.close = hci_uart_tty_close;
+- hci_uart_ldisc.read = hci_uart_tty_read;
+- hci_uart_ldisc.write = hci_uart_tty_write;
+- hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
+- hci_uart_ldisc.compat_ioctl = hci_uart_tty_ioctl;
+- hci_uart_ldisc.poll = hci_uart_tty_poll;
+- hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
+- hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
+- hci_uart_ldisc.owner = THIS_MODULE;
+-
+ err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
+ if (err) {
+ BT_ERR("HCI line discipline registration failed. (%d)", err);
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 4a963682c7021..de36af63e1825 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -50,7 +50,8 @@
+ #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
+ #define CMD_TRANS_TIMEOUT_MS 100
+ #define MEMDUMP_TIMEOUT_MS 8000
+-#define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000)
++#define IBS_DISABLE_SSR_TIMEOUT_MS \
++ (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
+ #define FW_DOWNLOAD_TIMEOUT_MS 3000
+
+ /* susclk rate */
+@@ -76,7 +77,8 @@ enum qca_flags {
+ QCA_MEMDUMP_COLLECTION,
+ QCA_HW_ERROR_EVENT,
+ QCA_SSR_TRIGGERED,
+- QCA_BT_OFF
++ QCA_BT_OFF,
++ QCA_ROM_FW
+ };
+
+ enum qca_capabilities {
+@@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work)
+ dump_size = __le32_to_cpu(dump->dump_size);
+ if (!(dump_size)) {
+ bt_dev_err(hu->hdev, "Rx invalid memdump size");
++ kfree(qca_memdump);
+ kfree_skb(skb);
++ qca->qca_memdump = NULL;
+ mutex_unlock(&qca->hci_memdump_lock);
+ return;
+ }
+@@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu)
+ if (ret)
+ return ret;
+
++ clear_bit(QCA_ROM_FW, &qca->flags);
+ /* Patch downloading has to be done without IBS mode */
+ set_bit(QCA_IBS_DISABLED, &qca->flags);
+
+@@ -1718,12 +1723,14 @@ retry:
+ hu->hdev->cmd_timeout = qca_cmd_timeout;
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
++ set_bit(QCA_ROM_FW, &qca->flags);
+ ret = 0;
+ } else if (ret == -EAGAIN) {
+ /*
+ * Userspace firmware loader will return -EAGAIN in case no
+ * patch/nvm-config is found, so run with original fw/config.
+ */
++ set_bit(QCA_ROM_FW, &qca->flags);
+ ret = 0;
+ }
+
+@@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev)
+
+ set_bit(QCA_SUSPENDING, &qca->flags);
+
+- if (test_bit(QCA_BT_OFF, &qca->flags))
++ /* if BT SoC is running with default firmware then it does not
++ * support in-band sleep
++ */
++ if (test_bit(QCA_ROM_FW, &qca->flags))
++ return 0;
++
++ /* During SSR after memory dump collection, controller will be
++ * powered off and then powered on.If controller is powered off
++ * during SSR then we should wait until SSR is completed.
++ */
++ if (test_bit(QCA_BT_OFF, &qca->flags) &&
++ !test_bit(QCA_SSR_TRIGGERED, &qca->flags))
+ return 0;
+
+- if (test_bit(QCA_IBS_DISABLED, &qca->flags)) {
++ if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
++ test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
+ wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
+ IBS_DISABLE_SSR_TIMEOUT_MS :
+ FW_DOWNLOAD_TIMEOUT_MS;
+
+ /* QCA_IBS_DISABLED flag is set to true, During FW download
+ * and during memory dump collection. It is reset to false,
+- * After FW download complete and after memory dump collections.
++ * After FW download complete.
+ */
+ wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
+ TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
+@@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev)
+ }
+ }
+
+- /* After memory dump collection, Controller is powered off.*/
+- if (test_bit(QCA_BT_OFF, &qca->flags))
+- return 0;
+-
+ cancel_work_sync(&qca->ws_awake_device);
+ cancel_work_sync(&qca->ws_awake_rx);
+
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index ef96ad06fa54e..9e03402ef1b37 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
+ hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
+ kfree_skb(skb);
+ }
+- } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+
+- clear_bit(HCI_UART_SENDING, &hu->tx_state);
++ clear_bit(HCI_UART_SENDING, &hu->tx_state);
++ } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+ }
+
+ /* ------- Interface to HCI layer ------ */
+diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
+index f0697f433c2f1..08c45457c90fe 100644
+--- a/drivers/bus/mhi/core/init.c
++++ b/drivers/bus/mhi/core/init.c
+@@ -552,6 +552,9 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+ tre_ring = &mhi_chan->tre_ring;
+ chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
++ if (!chan_ctxt->rbase) /* Already uninitialized */
++ return;
++
+ mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+ tre_ring->pre_aligned, tre_ring->dma_handle);
+ vfree(buf_ring->base);
+diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
+index 954a8411d67d2..0eb80f786f4dd 100644
+--- a/drivers/char/hw_random/ingenic-trng.c
++++ b/drivers/char/hw_random/ingenic-trng.c
+@@ -113,13 +113,17 @@ static int ingenic_trng_probe(struct platform_device *pdev)
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hwrng\n");
+- return ret;
++ goto err_unprepare_clk;
+ }
+
+ platform_set_drvdata(pdev, trng);
+
+ dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
+ return 0;
++
++err_unprepare_clk:
++ clk_disable_unprepare(trng->clk);
++ return ret;
+ }
+
+ static int ingenic_trng_remove(struct platform_device *pdev)
+diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
+index e262445fed5f5..f35f0f31f52ad 100644
+--- a/drivers/char/hw_random/timeriomem-rng.c
++++ b/drivers/char/hw_random/timeriomem-rng.c
+@@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
+ */
+ if (retval > 0)
+ usleep_range(period_us,
+- period_us + min(1, period_us / 100));
++ period_us + max(1, period_us / 100));
+
+ *(u32 *)data = readl(priv->io_base);
+ retval += sizeof(u32);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 5f3b8ac9d97b0..a894c0559a8cf 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1972,7 +1972,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ return -EPERM;
+ if (crng_init < 2)
+ return -ENODATA;
+- crng_reseed(&primary_crng, NULL);
++ crng_reseed(&primary_crng, &input_pool);
+ crng_global_init_time = jiffies - 1;
+ return 0;
+ default:
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 947d1db0a5ccf..283f78211c3a7 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -164,8 +164,6 @@ extern const struct file_operations tpmrm_fops;
+ extern struct idr dev_nums_idr;
+
+ ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz);
+-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+- size_t min_rsp_body_length, const char *desc);
+ int tpm_get_timeouts(struct tpm_chip *);
+ int tpm_auto_startup(struct tpm_chip *chip);
+
+@@ -194,8 +192,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
+ int tpm_chip_start(struct tpm_chip *chip);
+ void tpm_chip_stop(struct tpm_chip *chip);
+ struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
+-__must_check int tpm_try_get_ops(struct tpm_chip *chip);
+-void tpm_put_ops(struct tpm_chip *chip);
+
+ struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops);
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 92c51c6cfd1b7..431919d5f48af 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ if (rc < 0)
+ return false;
+
+- if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
++ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
++ | TPM_ACCESS_REQUEST_USE)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ priv->locality = l;
+ return true;
+@@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ return false;
+ }
+
+-static bool locality_inactive(struct tpm_chip *chip, int l)
+-{
+- struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+- int rc;
+- u8 access;
+-
+- rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+- if (rc < 0)
+- return false;
+-
+- if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
+- == TPM_ACCESS_VALID)
+- return true;
+-
+- return false;
+-}
+-
+ static int release_locality(struct tpm_chip *chip, int l)
+ {
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+- unsigned long stop, timeout;
+- long rc;
+
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+- stop = jiffies + chip->timeout_a;
+-
+- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+-again:
+- timeout = stop - jiffies;
+- if ((long)timeout <= 0)
+- return -1;
+-
+- rc = wait_event_interruptible_timeout(priv->int_queue,
+- (locality_inactive(chip, l)),
+- timeout);
+-
+- if (rc > 0)
+- return 0;
+-
+- if (rc == -ERESTARTSYS && freezing(current)) {
+- clear_thread_flag(TIF_SIGPENDING);
+- goto again;
+- }
+- } else {
+- do {
+- if (locality_inactive(chip, l))
+- return 0;
+- tpm_msleep(TPM_TIMEOUT);
+- } while (time_before(jiffies, stop));
+- }
+- return -1;
++ return 0;
+ }
+
+ static int request_locality(struct tpm_chip *chip, int l)
+diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
+index 177368cac6dd6..a55b37fc2c8bd 100644
+--- a/drivers/clk/clk-ast2600.c
++++ b/drivers/clk/clk-ast2600.c
+@@ -17,7 +17,8 @@
+
+ #define ASPEED_G6_NUM_CLKS 71
+
+-#define ASPEED_G6_SILICON_REV 0x004
++#define ASPEED_G6_SILICON_REV 0x014
++#define CHIP_REVISION_ID GENMASK(23, 16)
+
+ #define ASPEED_G6_RESET_CTRL 0x040
+ #define ASPEED_G6_RESET_CTRL2 0x050
+@@ -190,18 +191,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
+ static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
+ {
+ unsigned int mult, div;
++ u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
+
+- if (val & BIT(20)) {
+- /* Pass through mode */
+- mult = div = 1;
++ if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
++ if (val & BIT(24)) {
++ /* Pass through mode */
++ mult = div = 1;
++ } else {
++ /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
++ u32 m = val & 0x1fff;
++ u32 n = (val >> 13) & 0x3f;
++ u32 p = (val >> 19) & 0xf;
++
++ mult = (m + 1);
++ div = (n + 1) * (p + 1);
++ }
+ } else {
+- /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
+- u32 m = (val >> 5) & 0x3f;
+- u32 od = (val >> 4) & 0x1;
+- u32 n = val & 0xf;
++ if (val & BIT(20)) {
++ /* Pass through mode */
++ mult = div = 1;
++ } else {
++ /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
++ u32 m = (val >> 5) & 0x3f;
++ u32 od = (val >> 4) & 0x1;
++ u32 n = val & 0xf;
+
+- mult = (2 - od) * (m + 2);
+- div = n + 1;
++ mult = (2 - od) * (m + 2);
++ div = n + 1;
++ }
+ }
+ return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
+ mult, div);
+diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
+index c499799693ccc..344997203f0e7 100644
+--- a/drivers/clk/clk-divider.c
++++ b/drivers/clk/clk-divider.c
+@@ -494,8 +494,13 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
+ else
+ init.ops = &clk_divider_ops;
+ init.flags = flags;
+- init.parent_names = (parent_name ? &parent_name: NULL);
+- init.num_parents = (parent_name ? 1 : 0);
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.parent_hws = parent_hw ? &parent_hw : NULL;
++ init.parent_data = parent_data;
++ if (parent_name || parent_hw || parent_data)
++ init.num_parents = 1;
++ else
++ init.num_parents = 0;
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
+index b17a13e9337c4..49f27fe532139 100644
+--- a/drivers/clk/meson/clk-pll.c
++++ b/drivers/clk/meson/clk-pll.c
+@@ -365,13 +365,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+- unsigned int enabled, m, n, frac = 0, ret;
++ unsigned int enabled, m, n, frac = 0;
+ unsigned long old_rate;
++ int ret;
+
+ if (parent_rate == 0 || rate == 0)
+ return -EINVAL;
+
+- old_rate = rate;
++ old_rate = clk_hw_get_rate(hw);
+
+ ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
+ if (ret)
+@@ -393,7 +394,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ if (!enabled)
+ return 0;
+
+- if (meson_clk_pll_enable(hw)) {
++ ret = meson_clk_pll_enable(hw);
++ if (ret) {
+ pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
+ __func__, old_rate);
+ /*
+@@ -405,7 +407,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ meson_clk_pll_set_rate(hw, old_rate, parent_rate);
+ }
+
+- return 0;
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
+index 9d7016bcd6800..b8dcfe62312bb 100644
+--- a/drivers/clk/qcom/gcc-msm8998.c
++++ b/drivers/clk/qcom/gcc-msm8998.c
+@@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
+
+ static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_test = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_test",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_even = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_even",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_main = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_main",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_odd = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_odd",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_test = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_test",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll2 = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
+ .name = "gpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_even = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_even",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_main",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_odd = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_odd",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_test = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_test",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
+ .name = "gpll3",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_even",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_odd = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_odd",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_test = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_test",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll4 = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_odd = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_odd",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_test = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_test",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
+index b05901b249172..88e896abb6631 100644
+--- a/drivers/clk/qcom/gcc-sc7180.c
++++ b/drivers/clk/qcom/gcc-sc7180.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ */
+
+ #include
+@@ -919,19 +919,6 @@ static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
+ },
+ };
+
+-static struct clk_branch gcc_camera_xo_clk = {
+- .halt_reg = 0xb02c,
+- .halt_check = BRANCH_HALT,
+- .clkr = {
+- .enable_reg = 0xb02c,
+- .enable_mask = BIT(0),
+- .hw.init = &(struct clk_init_data){
+- .name = "gcc_camera_xo_clk",
+- .ops = &clk_branch2_ops,
+- },
+- },
+-};
+-
+ static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+@@ -1096,19 +1083,6 @@ static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
+ },
+ };
+
+-static struct clk_branch gcc_disp_xo_clk = {
+- .halt_reg = 0xb030,
+- .halt_check = BRANCH_HALT,
+- .clkr = {
+- .enable_reg = 0xb030,
+- .enable_mask = BIT(0),
+- .hw.init = &(struct clk_init_data){
+- .name = "gcc_disp_xo_clk",
+- .ops = &clk_branch2_ops,
+- },
+- },
+-};
+-
+ static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+@@ -2159,19 +2133,6 @@ static struct clk_branch gcc_video_throttle_axi_clk = {
+ },
+ };
+
+-static struct clk_branch gcc_video_xo_clk = {
+- .halt_reg = 0xb028,
+- .halt_check = BRANCH_HALT,
+- .clkr = {
+- .enable_reg = 0xb028,
+- .enable_mask = BIT(0),
+- .hw.init = &(struct clk_init_data){
+- .name = "gcc_video_xo_clk",
+- .ops = &clk_branch2_ops,
+- },
+- },
+-};
+-
+ static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+@@ -2304,7 +2265,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
+- [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+@@ -2317,7 +2277,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
+- [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+@@ -2413,7 +2372,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_GPLL0_DIV_CLK_SRC] = &gcc_video_gpll0_div_clk_src.clkr,
+ [GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+- [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL6] = &gpll6.clkr,
+@@ -2510,6 +2468,9 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
+ regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
++ regmap_update_bits(regmap, 0x0b02c, BIT(0), BIT(0));
++ regmap_update_bits(regmap, 0x0b028, BIT(0), BIT(0));
++ regmap_update_bits(regmap, 0x0b030, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
+index d366c7c2abc77..f5e31e692b9b4 100644
+--- a/drivers/clk/qcom/lpass-gfm-sm8250.c
++++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
+@@ -33,14 +33,13 @@ struct clk_gfm {
+ void __iomem *gfm_mux;
+ };
+
+-#define GFM_MASK BIT(1)
+ #define to_clk_gfm(_hw) container_of(_hw, struct clk_gfm, hw)
+
+ static u8 clk_gfm_get_parent(struct clk_hw *hw)
+ {
+ struct clk_gfm *clk = to_clk_gfm(hw);
+
+- return readl(clk->gfm_mux) & GFM_MASK;
++ return readl(clk->gfm_mux) & clk->mux_mask;
+ }
+
+ static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
+@@ -51,9 +50,10 @@ static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
+ val = readl(clk->gfm_mux);
+
+ if (index)
+- val |= GFM_MASK;
++ val |= clk->mux_mask;
+ else
+- val &= ~GFM_MASK;
++ val &= ~clk->mux_mask;
++
+
+ writel(val, clk->gfm_mux);
+
+diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+index aa5389b04d742..7b2c640c3de0c 100644
+--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+@@ -69,7 +69,6 @@ enum clk_ids {
+ CLK_PLL5_DIV2,
+ CLK_PLL5_DIV4,
+ CLK_S1,
+- CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_RPCSRC,
+@@ -137,7 +136,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
+ DEF_FIXED("icu", R8A779A0_CLK_ICU, CLK_PLL5_DIV4, 2, 1),
+ DEF_FIXED("icud2", R8A779A0_CLK_ICUD2, CLK_PLL5_DIV4, 4, 1),
+ DEF_FIXED("vcbus", R8A779A0_CLK_VCBUS, CLK_PLL5_DIV4, 1, 1),
+- DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_MAIN, 2, 1),
++ DEF_FIXED("cbfusa", R8A779A0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+ DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
+diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
+index 14c7c47124787..66be9ea69e332 100644
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -79,6 +79,7 @@ config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
++ select TIMER_OF if OF
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
+diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
+index bc96a4cbf26c6..e52e12d27d2aa 100644
+--- a/drivers/clocksource/mxs_timer.c
++++ b/drivers/clocksource/mxs_timer.c
+@@ -131,10 +131,7 @@ static void mxs_irq_clear(char *state)
+
+ /* Clear pending interrupt */
+ timrot_irq_acknowledge();
+-
+-#ifdef DEBUG
+- pr_info("%s: changing mode to %s\n", __func__, state)
+-#endif /* DEBUG */
++ pr_debug("%s: changing mode to %s\n", __func__, state);
+ }
+
+ static int mxs_shutdown(struct clock_event_device *evt)
+diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
+index d3e5a6fceb61b..d1bbc16fba4b4 100644
+--- a/drivers/cpufreq/acpi-cpufreq.c
++++ b/drivers/cpufreq/acpi-cpufreq.c
+@@ -54,7 +54,6 @@ struct acpi_cpufreq_data {
+ unsigned int resume;
+ unsigned int cpu_feature;
+ unsigned int acpi_perf_cpu;
+- unsigned int first_perf_state;
+ cpumask_var_t freqdomain_cpus;
+ void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
+@@ -223,10 +222,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
+
+ perf = to_perf_data(data);
+
+- cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
++ cpufreq_for_each_entry(pos, policy->freq_table)
+ if (msr == perf->states[pos->driver_data].status)
+ return pos->frequency;
+- return policy->freq_table[data->first_perf_state].frequency;
++ return policy->freq_table[0].frequency;
+ }
+
+ static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
+@@ -365,7 +364,6 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+ struct cpufreq_policy *policy;
+ unsigned int freq;
+ unsigned int cached_freq;
+- unsigned int state;
+
+ pr_debug("%s (%d)\n", __func__, cpu);
+
+@@ -377,11 +375,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+ if (unlikely(!data || !policy->freq_table))
+ return 0;
+
+- state = to_perf_data(data)->state;
+- if (state < data->first_perf_state)
+- state = data->first_perf_state;
+-
+- cached_freq = policy->freq_table[state].frequency;
++ cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
+ if (freq != cached_freq) {
+ /*
+@@ -680,7 +674,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int valid_states = 0;
+ unsigned int result = 0;
+- unsigned int state_count;
+ u64 max_boost_ratio;
+ unsigned int i;
+ #ifdef CONFIG_SMP
+@@ -795,28 +788,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ goto err_unreg;
+ }
+
+- state_count = perf->state_count + 1;
+-
+- max_boost_ratio = get_max_boost_ratio(cpu);
+- if (max_boost_ratio) {
+- /*
+- * Make a room for one more entry to represent the highest
+- * available "boost" frequency.
+- */
+- state_count++;
+- valid_states++;
+- data->first_perf_state = valid_states;
+- } else {
+- /*
+- * If the maximum "boost" frequency is unknown, ask the arch
+- * scale-invariance code to use the "nominal" performance for
+- * CPU utilization scaling so as to prevent the schedutil
+- * governor from selecting inadequate CPU frequencies.
+- */
+- arch_set_max_freq_ratio(true);
+- }
+-
+- freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
++ freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
++ GFP_KERNEL);
+ if (!freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+@@ -851,27 +824,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ }
+ freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+
++ max_boost_ratio = get_max_boost_ratio(cpu);
+ if (max_boost_ratio) {
+- unsigned int state = data->first_perf_state;
+- unsigned int freq = freq_table[state].frequency;
++ unsigned int freq = freq_table[0].frequency;
+
+ /*
+ * Because the loop above sorts the freq_table entries in the
+ * descending order, freq is the maximum frequency in the table.
+ * Assume that it corresponds to the CPPC nominal frequency and
+- * use it to populate the frequency field of the extra "boost"
+- * frequency entry.
++ * use it to set cpuinfo.max_freq.
+ */
+- freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
++ policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
++ } else {
+ /*
+- * The purpose of the extra "boost" frequency entry is to make
+- * the rest of cpufreq aware of the real maximum frequency, but
+- * the way to request it is the same as for the first_perf_state
+- * entry that is expected to cover the entire range of "boost"
+- * frequencies of the CPU, so copy the driver_data value from
+- * that entry.
++ * If the maximum "boost" frequency is unknown, ask the arch
++ * scale-invariance code to use the "nominal" performance for
++ * CPU utilization scaling so as to prevent the schedutil
++ * governor from selecting inadequate CPU frequencies.
+ */
+- freq_table[0].driver_data = freq_table[state].driver_data;
++ arch_set_max_freq_ratio(true);
+ }
+
+ policy->freq_table = freq_table;
+@@ -947,8 +918,7 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
+ {
+ struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
+ policy->cpu);
+- struct acpi_cpufreq_data *data = policy->driver_data;
+- unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
++ unsigned int freq = policy->freq_table[0].frequency;
+
+ if (perf->states[0].core_frequency * 1000 != freq)
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 3e31e5d28b79c..4153150e20db5 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -597,6 +597,16 @@ unmap_base:
+ return ret;
+ }
+
++static void brcm_avs_prepare_uninit(struct platform_device *pdev)
++{
++ struct private_data *priv;
++
++ priv = platform_get_drvdata(pdev);
++
++ iounmap(priv->avs_intr_base);
++ iounmap(priv->base);
++}
++
+ static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ struct cpufreq_frequency_table *freq_table;
+@@ -732,21 +742,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+
+ brcm_avs_driver.driver_data = pdev;
+
+- return cpufreq_register_driver(&brcm_avs_driver);
++ ret = cpufreq_register_driver(&brcm_avs_driver);
++ if (ret)
++ brcm_avs_prepare_uninit(pdev);
++
++ return ret;
+ }
+
+ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+ {
+- struct private_data *priv;
+ int ret;
+
+ ret = cpufreq_unregister_driver(&brcm_avs_driver);
+- if (ret)
+- return ret;
++ WARN_ON(ret);
+
+- priv = platform_get_drvdata(pdev);
+- iounmap(priv->base);
+- iounmap(priv->avs_intr_base);
++ brcm_avs_prepare_uninit(pdev);
+
+ return 0;
+ }
+diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
+index f839dc9852c08..d3f756f7b5a05 100644
+--- a/drivers/cpufreq/freq_table.c
++++ b/drivers/cpufreq/freq_table.c
+@@ -52,7 +52,13 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+ }
+
+ policy->min = policy->cpuinfo.min_freq = min_freq;
+- policy->max = policy->cpuinfo.max_freq = max_freq;
++ policy->max = max_freq;
++ /*
++ * If the driver has set its own cpuinfo.max_freq above max_freq, leave
++ * it as is.
++ */
++ if (policy->cpuinfo.max_freq < max_freq)
++ policy->max = policy->cpuinfo.max_freq = max_freq;
+
+ if (policy->min == ~0)
+ return -EINVAL;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index be05e038d956c..c4d8a5126d611 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -819,13 +819,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
+ NULL,
+ };
+
+-static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
++static void intel_pstate_get_hwp_max(struct cpudata *cpu, int *phy_max,
+ int *current_max)
+ {
+ u64 cap;
+
+- rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+- WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
++ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
++ WRITE_ONCE(cpu->hwp_cap_cached, cap);
+ if (global.no_turbo || global.turbo_disabled)
+ *current_max = HWP_GUARANTEED_PERF(cap);
+ else
+@@ -1213,7 +1213,7 @@ static void update_qos_request(enum freq_qos_req_type type)
+ continue;
+
+ if (hwp_active)
+- intel_pstate_get_hwp_max(i, &turbo_max, &max_state);
++ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ else
+ turbo_max = cpu->pstate.turbo_pstate;
+
+@@ -1714,21 +1714,22 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+- cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ cpu->pstate.scaling = pstate_funcs.get_scaling();
+- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+
+ if (hwp_active && !hwp_mode_bdw) {
+ unsigned int phy_max, current_max;
+
+- intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
++ intel_pstate_get_hwp_max(cpu, &phy_max, ¤t_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ cpu->pstate.turbo_pstate = phy_max;
++ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ }
++ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+@@ -2207,7 +2208,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
+ * rather than pure ratios.
+ */
+ if (hwp_active) {
+- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
++ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ } else {
+ max_state = global.no_turbo || global.turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+@@ -2322,7 +2323,7 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
+ if (hwp_active) {
+ int max_state, turbo_max;
+
+- intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
++ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ max_freq = max_state * cpu->pstate.scaling;
+ } else {
+ max_freq = intel_pstate_get_max_freq(cpu);
+@@ -2709,7 +2710,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ if (hwp_active) {
+ u64 value;
+
+- intel_pstate_get_hwp_max(policy->cpu, &turbo_max, &max_state);
++ intel_pstate_get_hwp_max(cpu, &turbo_max, &max_state);
+ policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
+ rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index 9ed5341dc515b..2726e77c9e5a9 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -32,6 +32,7 @@ struct qcom_cpufreq_soc_data {
+
+ struct qcom_cpufreq_data {
+ void __iomem *base;
++ struct resource *res;
+ const struct qcom_cpufreq_soc_data *soc_data;
+ };
+
+@@ -280,6 +281,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+ struct of_phandle_args args;
+ struct device_node *cpu_np;
+ struct device *cpu_dev;
++ struct resource *res;
+ void __iomem *base;
+ struct qcom_cpufreq_data *data;
+ int ret, index;
+@@ -303,18 +305,33 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+
+ index = args.args[0];
+
+- base = devm_platform_ioremap_resource(pdev, index);
+- if (IS_ERR(base))
+- return PTR_ERR(base);
++ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
++ if (!res) {
++ dev_err(dev, "failed to get mem resource %d\n", index);
++ return -ENODEV;
++ }
++
++ if (!request_mem_region(res->start, resource_size(res), res->name)) {
++ dev_err(dev, "failed to request resource %pR\n", res);
++ return -EBUSY;
++ }
+
+- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
++ base = ioremap(res->start, resource_size(res));
++ if (IS_ERR(base)) {
++ dev_err(dev, "failed to map resource %pR\n", res);
++ ret = PTR_ERR(base);
++ goto release_region;
++ }
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+- goto error;
++ goto unmap_base;
+ }
+
+ data->soc_data = of_device_get_match_data(&pdev->dev);
+ data->base = base;
++ data->res = res;
+
+ /* HW should be in enabled state to proceed */
+ if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
+@@ -349,7 +366,11 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+
+ return 0;
+ error:
+- devm_iounmap(dev, base);
++ kfree(data);
++unmap_base:
++ iounmap(data->base);
++release_region:
++ release_mem_region(res->start, resource_size(res));
+ return ret;
+ }
+
+@@ -357,12 +378,15 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+ {
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+ struct qcom_cpufreq_data *data = policy->driver_data;
+- struct platform_device *pdev = cpufreq_get_driver_data();
++ struct resource *res = data->res;
++ void __iomem *base = data->base;
+
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ kfree(policy->freq_table);
+- devm_iounmap(&pdev->dev, data->base);
++ kfree(data);
++ iounmap(base);
++ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+ }
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+index b72de8939497b..ffa628c89e21f 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+@@ -20,6 +20,7 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ u32 mode = ctx->mode;
++ void *backup_iv = NULL;
+ /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+ u32 rx_cnt = SS_RX_DEFAULT;
+ u32 tx_cnt = 0;
+@@ -30,6 +31,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ unsigned int ileft = areq->cryptlen;
+ unsigned int oleft = areq->cryptlen;
+ unsigned int todo;
++ unsigned long pi = 0, po = 0; /* progress for in and out */
++ bool miter_err;
+ struct sg_mapping_iter mi, mo;
+ unsigned int oi, oo; /* offset for in and out */
+ unsigned long flags;
+@@ -42,52 +45,71 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ return -EINVAL;
+ }
+
++ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
++ backup_iv = kzalloc(ivsize, GFP_KERNEL);
++ if (!backup_iv)
++ return -ENOMEM;
++ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
++ }
++
+ spin_lock_irqsave(&ss->slock, flags);
+
+- for (i = 0; i < op->keylen; i += 4)
+- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
++ for (i = 0; i < op->keylen / 4; i++)
++ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
+
+ if (areq->iv) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->iv + i * 4);
+- writel(v, ss->base + SS_IV0 + i * 4);
++ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+- SG_MITER_TO_SG | SG_MITER_ATOMIC);
+- sg_miter_next(&mi);
+- sg_miter_next(&mo);
+- if (!mi.addr || !mo.addr) {
+- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+- err = -EINVAL;
+- goto release_ss;
+- }
+
+ ileft = areq->cryptlen / 4;
+ oleft = areq->cryptlen / 4;
+ oi = 0;
+ oo = 0;
+ do {
+- todo = min(rx_cnt, ileft);
+- todo = min_t(size_t, todo, (mi.length - oi) / 4);
+- if (todo) {
+- ileft -= todo;
+- writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+- oi += todo * 4;
+- }
+- if (oi == mi.length) {
+- sg_miter_next(&mi);
+- oi = 0;
++ if (ileft) {
++ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
++ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
++ if (pi)
++ sg_miter_skip(&mi, pi);
++ miter_err = sg_miter_next(&mi);
++ if (!miter_err || !mi.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
++ todo = min(rx_cnt, ileft);
++ todo = min_t(size_t, todo, (mi.length - oi) / 4);
++ if (todo) {
++ ileft -= todo;
++ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
++ oi += todo * 4;
++ }
++ if (oi == mi.length) {
++ pi += mi.length;
++ oi = 0;
++ }
++ sg_miter_stop(&mi);
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+
++ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
++ SG_MITER_TO_SG | SG_MITER_ATOMIC);
++ if (po)
++ sg_miter_skip(&mo, po);
++ miter_err = sg_miter_next(&mo);
++ if (!miter_err || !mo.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
+ todo = min(tx_cnt, oleft);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
+ if (todo) {
+@@ -96,21 +118,23 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ oo += todo * 4;
+ }
+ if (oo == mo.length) {
+- sg_miter_next(&mo);
+ oo = 0;
++ po += mo.length;
+ }
++ sg_miter_stop(&mo);
+ } while (oleft);
+
+ if (areq->iv) {
+- for (i = 0; i < 4 && i < ivsize / 4; i++) {
+- v = readl(ss->base + SS_IV0 + i * 4);
+- *(u32 *)(areq->iv + i * 4) = v;
++ if (mode & SS_DECRYPTION) {
++ memcpy(areq->iv, backup_iv, ivsize);
++ kfree_sensitive(backup_iv);
++ } else {
++ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
++ ivsize, 0);
+ }
+ }
+
+ release_ss:
+- sg_miter_stop(&mi);
+- sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_irqrestore(&ss->slock, flags);
+ return err;
+@@ -161,13 +185,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ unsigned int ileft = areq->cryptlen;
+ unsigned int oleft = areq->cryptlen;
+ unsigned int todo;
++ void *backup_iv = NULL;
+ struct sg_mapping_iter mi, mo;
++ unsigned long pi = 0, po = 0; /* progress for in and out */
++ bool miter_err;
+ unsigned int oi, oo; /* offset for in and out */
+ unsigned int ob = 0; /* offset in buf */
+ unsigned int obo = 0; /* offset in bufo*/
+ unsigned int obl = 0; /* length of data in bufo */
+ unsigned long flags;
+- bool need_fallback;
++ bool need_fallback = false;
+
+ if (!areq->cryptlen)
+ return 0;
+@@ -186,12 +213,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ * we can use the SS optimized function
+ */
+ while (in_sg && no_chunk == 1) {
+- if (in_sg->length % 4)
++ if ((in_sg->length | in_sg->offset) & 3u)
+ no_chunk = 0;
+ in_sg = sg_next(in_sg);
+ }
+ while (out_sg && no_chunk == 1) {
+- if (out_sg->length % 4)
++ if ((out_sg->length | out_sg->offset) & 3u)
+ no_chunk = 0;
+ out_sg = sg_next(out_sg);
+ }
+@@ -202,30 +229,26 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ if (need_fallback)
+ return sun4i_ss_cipher_poll_fallback(areq);
+
++ if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
++ backup_iv = kzalloc(ivsize, GFP_KERNEL);
++ if (!backup_iv)
++ return -ENOMEM;
++ scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
++ }
++
+ spin_lock_irqsave(&ss->slock, flags);
+
+- for (i = 0; i < op->keylen; i += 4)
+- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
++ for (i = 0; i < op->keylen / 4; i++)
++ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
+
+ if (areq->iv) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->iv + i * 4);
+- writel(v, ss->base + SS_IV0 + i * 4);
++ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+- SG_MITER_TO_SG | SG_MITER_ATOMIC);
+- sg_miter_next(&mi);
+- sg_miter_next(&mo);
+- if (!mi.addr || !mo.addr) {
+- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+- err = -EINVAL;
+- goto release_ss;
+- }
+ ileft = areq->cryptlen;
+ oleft = areq->cryptlen;
+ oi = 0;
+@@ -233,8 +256,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+
+ while (oleft) {
+ if (ileft) {
+- char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+-
++ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
++ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
++ if (pi)
++ sg_miter_skip(&mi, pi);
++ miter_err = sg_miter_next(&mi);
++ if (!miter_err || !mi.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
+ /*
+ * todo is the number of consecutive 4byte word that we
+ * can read from current SG
+@@ -256,52 +287,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ */
+ todo = min(rx_cnt * 4 - ob, ileft);
+ todo = min_t(size_t, todo, mi.length - oi);
+- memcpy(buf + ob, mi.addr + oi, todo);
++ memcpy(ss->buf + ob, mi.addr + oi, todo);
+ ileft -= todo;
+ oi += todo;
+ ob += todo;
+ if (!(ob % 4)) {
+- writesl(ss->base + SS_RXFIFO, buf,
++ writesl(ss->base + SS_RXFIFO, ss->buf,
+ ob / 4);
+ ob = 0;
+ }
+ }
+ if (oi == mi.length) {
+- sg_miter_next(&mi);
++ pi += mi.length;
+ oi = 0;
+ }
++ sg_miter_stop(&mi);
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+- dev_dbg(ss->dev,
+- "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
+- mode,
+- oi, mi.length, ileft, areq->cryptlen, rx_cnt,
+- oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
+
+ if (!tx_cnt)
+ continue;
++ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
++ SG_MITER_TO_SG | SG_MITER_ATOMIC);
++ if (po)
++ sg_miter_skip(&mo, po);
++ miter_err = sg_miter_next(&mo);
++ if (!miter_err || !mo.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
+ /* todo in 4bytes word */
+ todo = min(tx_cnt, oleft / 4);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
++
+ if (todo) {
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oleft -= todo * 4;
+ oo += todo * 4;
+ if (oo == mo.length) {
+- sg_miter_next(&mo);
++ po += mo.length;
+ oo = 0;
+ }
+ } else {
+- char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+-
+ /*
+ * read obl bytes in bufo, we read at maximum for
+ * emptying the device
+ */
+- readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
++ readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
+ obl = tx_cnt * 4;
+ obo = 0;
+ do {
+@@ -313,28 +349,31 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ */
+ todo = min_t(size_t,
+ mo.length - oo, obl - obo);
+- memcpy(mo.addr + oo, bufo + obo, todo);
++ memcpy(mo.addr + oo, ss->bufo + obo, todo);
+ oleft -= todo;
+ obo += todo;
+ oo += todo;
+ if (oo == mo.length) {
++ po += mo.length;
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } while (obo < obl);
+ /* bufo must be fully used here */
+ }
++ sg_miter_stop(&mo);
+ }
+ if (areq->iv) {
+- for (i = 0; i < 4 && i < ivsize / 4; i++) {
+- v = readl(ss->base + SS_IV0 + i * 4);
+- *(u32 *)(areq->iv + i * 4) = v;
++ if (mode & SS_DECRYPTION) {
++ memcpy(areq->iv, backup_iv, ivsize);
++ kfree_sensitive(backup_iv);
++ } else {
++ scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
++ ivsize, 0);
+ }
+ }
+
+ release_ss:
+- sg_miter_stop(&mi);
+- sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_irqrestore(&ss->slock, flags);
+
+diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+index 5c291e4a6857b..c242fccb2ab67 100644
+--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
++++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+@@ -148,6 +148,8 @@ struct sun4i_ss_ctx {
+ struct reset_control *reset;
+ struct device *dev;
+ struct resource *res;
++ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
++ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+ spinlock_t slock; /* control the use of the device */
+ #ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+ u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
+index 30390a7324b29..0e5537838ef36 100644
+--- a/drivers/crypto/bcm/cipher.c
++++ b/drivers/crypto/bcm/cipher.c
+@@ -42,7 +42,7 @@
+
+ /* ================= Device Structure ================== */
+
+-struct device_private iproc_priv;
++struct bcm_device_private iproc_priv;
+
+ /* ==================== Parameters ===================== */
+
+diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
+index 0ad5892b445d3..71281a3bdbdc0 100644
+--- a/drivers/crypto/bcm/cipher.h
++++ b/drivers/crypto/bcm/cipher.h
+@@ -420,7 +420,7 @@ struct spu_hw {
+ u32 num_chan;
+ };
+
+-struct device_private {
++struct bcm_device_private {
+ struct platform_device *pdev;
+
+ struct spu_hw spu;
+@@ -467,6 +467,6 @@ struct device_private {
+ struct mbox_chan **mbox;
+ };
+
+-extern struct device_private iproc_priv;
++extern struct bcm_device_private iproc_priv;
+
+ #endif
+diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
+index 2b304fc780595..77aeedb840555 100644
+--- a/drivers/crypto/bcm/util.c
++++ b/drivers/crypto/bcm/util.c
+@@ -348,7 +348,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
+ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+ {
+- struct device_private *ipriv;
++ struct bcm_device_private *ipriv;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+ int i;
+diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
+index 846a3d90b41a3..77783feb62b25 100644
+--- a/drivers/crypto/qat/Kconfig
++++ b/drivers/crypto/qat/Kconfig
+@@ -11,7 +11,7 @@ config CRYPTO_DEV_QAT
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+- select CRYPTO_AES
++ select CRYPTO_LIB_AES
+ select FW_LOADER
+
+ config CRYPTO_DEV_QAT_DH895xCC
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 4fd85f31630ac..25c9f825b8b54 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1093,11 +1093,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+ */
+ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ unsigned int offset, int datalen, int elen,
+- struct talitos_ptr *link_tbl_ptr)
++ struct talitos_ptr *link_tbl_ptr, int align)
+ {
+ int n_sg = elen ? sg_count + 1 : sg_count;
+ int count = 0;
+ int cryptlen = datalen + elen;
++ int padding = ALIGN(cryptlen, align) - cryptlen;
+
+ while (cryptlen && sg && n_sg--) {
+ unsigned int len = sg_dma_len(sg);
+@@ -1121,7 +1122,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ offset += datalen;
+ }
+ to_talitos_ptr(link_tbl_ptr + count,
+- sg_dma_address(sg) + offset, len, 0);
++ sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+ count++;
+ cryptlen -= len;
+@@ -1144,10 +1145,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen,
+- bool force)
++ bool force, int align)
+ {
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
++ int aligned_len = ALIGN(len, align);
+
+ if (!src) {
+ to_talitos_ptr(ptr, 0, 0, is_sec1);
+@@ -1155,22 +1157,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ }
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
+ if (sg_count == 1 && !force) {
+- to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
++ to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
+ return sg_count;
+ }
+ if (is_sec1) {
+- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
++ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
+ return sg_count;
+ }
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
+- &edesc->link_tbl[tbl_off]);
++ &edesc->link_tbl[tbl_off], align);
+ if (sg_count == 1 && !force) {
+ /* Only one segment now, so no link tbl needed*/
+ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+ return sg_count;
+ }
+ to_talitos_ptr(ptr, edesc->dma_link_tbl +
+- tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
++ tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
+ to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
+
+ return sg_count;
+@@ -1182,7 +1184,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int offset, int tbl_off)
+ {
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+- tbl_off, 0, false);
++ tbl_off, 0, false, 1);
+ }
+
+ /*
+@@ -1251,7 +1253,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen,
+- false);
++ false, 1);
+
+ if (ret > 1) {
+ tbl_off += ret;
+@@ -1271,7 +1273,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ elen = 0;
+ ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off, elen,
+- is_ipsec_esp && !encrypt);
++ is_ipsec_esp && !encrypt, 1);
+ tbl_off += ret;
+
+ if (!encrypt && is_ipsec_esp) {
+@@ -1577,6 +1579,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
+ bool sync_needed = false;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
++ bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
++ (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
+
+ /* first DWORD empty */
+
+@@ -1597,8 +1601,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
+ /*
+ * cipher in
+ */
+- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+- &desc->ptr[3], sg_count, 0, 0);
++ sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
++ sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
+ if (sg_count > 1)
+ sync_needed = true;
+
+@@ -2761,6 +2765,22 @@ static struct talitos_alg_template driver_algs[] = {
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CTR,
+ },
++ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
++ .alg.skcipher = {
++ .base.cra_name = "ctr(aes)",
++ .base.cra_driver_name = "ctr-aes-talitos",
++ .base.cra_blocksize = 1,
++ .base.cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_ALLOCATES_MEMORY,
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ .setkey = skcipher_aes_setkey,
++ },
++ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
++ DESC_HDR_SEL0_AESU |
++ DESC_HDR_MODE0_AESU_CTR,
++ },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des)",
+@@ -3178,6 +3198,12 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
+ t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
+ t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
++ if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
++ DESC_TYPE(t_alg->algt.desc_hdr_template) !=
++ DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
++ devm_kfree(dev, t_alg);
++ return ERR_PTR(-ENOTSUPP);
++ }
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg = &t_alg->algt.alg.aead.base;
+diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
+index 1469b956948ab..32825119e8805 100644
+--- a/drivers/crypto/talitos.h
++++ b/drivers/crypto/talitos.h
+@@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
+
+ /* primary execution unit mode (MODE0) and derivatives */
+ #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
++#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
+ #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
+ #define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
+ #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index 737b207c9e30d..3003558c1a8bb 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -1038,7 +1038,7 @@ static ssize_t range_parse(const char *opt, size_t len, struct range *range)
+ {
+ unsigned long long addr = 0;
+ char *start, *end, *str;
+- ssize_t rc = EINVAL;
++ ssize_t rc = -EINVAL;
+
+ str = kstrdup(opt, GFP_KERNEL);
+ if (!str)
+diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
+index 0feb323bae1e3..f8459cc5315df 100644
+--- a/drivers/dma/fsldma.c
++++ b/drivers/dma/fsldma.c
+@@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
+ {
+ struct fsldma_device *fdev;
+ struct device_node *child;
++ unsigned int i;
+ int err;
+
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
+@@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
+ return 0;
+
+ out_free_fdev:
++ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
++ if (fdev->chan[i])
++ fsl_dma_chan_remove(fdev->chan[i]);
++ }
+ irq_dispose_mapping(fdev->irq);
+ iounmap(fdev->regs);
+ out_free:
+@@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
+ if (fdev->chan[i])
+ fsl_dma_chan_remove(fdev->chan[i]);
+ }
++ irq_dispose_mapping(fdev->irq);
+
+ iounmap(fdev->regs);
+ kfree(fdev);
+diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
+index 07cc7320a614f..9045a6f7f5893 100644
+--- a/drivers/dma/hsu/pci.c
++++ b/drivers/dma/hsu/pci.c
+@@ -26,22 +26,12 @@
+ static irqreturn_t hsu_pci_irq(int irq, void *dev)
+ {
+ struct hsu_dma_chip *chip = dev;
+- struct pci_dev *pdev = to_pci_dev(chip->dev);
+ u32 dmaisr;
+ u32 status;
+ unsigned short i;
+ int ret = 0;
+ int err;
+
+- /*
+- * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
+- * to have different numbers, is shared between HSU DMA and UART IPs.
+- * Thus on such SoCs we are expecting that IRQ handler is called in
+- * UART driver only.
+- */
+- if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
+- return IRQ_HANDLED;
+-
+ dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
+ for (i = 0; i < chip->hsu->nr_channels; i++) {
+ if (dmaisr & 0x1) {
+@@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (ret)
+ goto err_register_irq;
+
++ /*
++ * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
++ * to have different numbers, is shared between HSU DMA and UART IPs.
++ * Thus on such SoCs we are expecting that IRQ handler is called in
++ * UART driver only. Instead of handling the spurious interrupt
++ * from HSU DMA here and waste CPU time and delay HSU UART interrupt
++ * handling, disable the interrupt entirely.
++ */
++ if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
++ disable_irq_nosync(chip->irq);
++
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
+index 71fd6e4c42cd7..a15e50126434e 100644
+--- a/drivers/dma/idxd/dma.c
++++ b/drivers/dma/idxd/dma.c
+@@ -165,6 +165,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
+ INIT_LIST_HEAD(&dma->channels);
+ dma->dev = &idxd->pdev->dev;
+
++ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+ dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
+ dma->device_release = idxd_dma_release;
+
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index 9fede32641e9e..04202d75f4eed 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -1245,6 +1245,7 @@ static int owl_dma_remove(struct platform_device *pdev)
+ owl_dma_free(od);
+
+ clk_disable_unprepare(od->clk);
++ dma_pool_destroy(od->lli_pool);
+
+ return 0;
+ }
+diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
+index 1a0bf6b0567a5..e48eb397f433d 100644
+--- a/drivers/dma/qcom/gpi.c
++++ b/drivers/dma/qcom/gpi.c
+@@ -584,7 +584,7 @@ static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
+ gpi_write_reg(gpii, addr, val);
+ }
+
+-static inline void
++static __always_inline void
+ gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
+ {
+ void __iomem *addr = gpii->regs + offset;
+diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
+index f474a12323354..46bc1a419bdfb 100644
+--- a/drivers/dma/ti/k3-udma.c
++++ b/drivers/dma/ti/k3-udma.c
+@@ -4306,6 +4306,7 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
++ ud->rflow_cnt = ud->rchan_cnt;
+ break;
+ case DMA_TYPE_PKTDMA:
+ cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 5392e1fc6b4ef..cacdf1589b101 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -848,8 +848,6 @@ static int scmi_remove(struct platform_device *pdev)
+ struct scmi_info *info = platform_get_drvdata(pdev);
+ struct idr *idr = &info->tx_idr;
+
+- scmi_notification_exit(&info->handle);
+-
+ mutex_lock(&scmi_list_mutex);
+ if (info->users)
+ ret = -EBUSY;
+@@ -860,6 +858,8 @@ static int scmi_remove(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
++ scmi_notification_exit(&info->handle);
++
+ /* Safe to free channels since no more users */
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
+ idr_destroy(&info->tx_idr);
+diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
+index a2a8d155c75e3..b7568ee33696d 100644
+--- a/drivers/gpio/gpio-pcf857x.c
++++ b/drivers/gpio/gpio-pcf857x.c
+@@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
+ * reset state. Otherwise it flags pins to be driven low.
+ */
+ gpio->out = ~n_latch;
+- gpio->status = gpio->out;
++ gpio->status = gpio->read(gpio->client);
+
+ /* Enable irqchip if we have an interrupt */
+ if (client->irq) {
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 0973f408d75fe..af6c6d214d916 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -15,6 +15,9 @@ menuconfig DRM
+ select I2C_ALGOBIT
+ select DMA_SHARED_BUFFER
+ select SYNC_FILE
++# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
++# device and dmabuf fd. Let's make sure that is available for our userspace.
++ select KCMP
+ help
+ Kernel-level support for the Direct Rendering Infrastructure (DRI)
+ introduced in XFree86 4.0. If you say Y here, you need to select
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5993dd0fdd8e7..37fb846af4888 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1003,6 +1003,12 @@ struct amdgpu_device {
+ bool in_suspend;
+ bool in_hibernate;
+
++ /*
++ * The combination flag in_poweroff_reboot_com used to identify the poweroff
++ * and reboot opt in the s0i3 system-wide suspend.
++ */
++ bool in_poweroff_reboot_com;
++
+ atomic_t in_gpu_reset;
+ enum pp_mp1_state mp1_state;
+ struct rw_semaphore reset_sem;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index cab1ebaf6d629..bc5b644ddda34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2666,7 +2666,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+ {
+ int i, r;
+
+- if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
++ if (adev->in_poweroff_reboot_com ||
++ !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ }
+@@ -3726,7 +3727,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+
+ amdgpu_fence_driver_suspend(adev);
+
+- if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
++ if (adev->in_poweroff_reboot_com ||
++ !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
+ r = amdgpu_device_ip_suspend_phase2(adev);
+ else
+ amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 7169fb5e3d9c4..0ffea970d0179 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1266,7 +1266,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
+ */
+ if (!amdgpu_passthrough(adev))
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
++ adev->in_poweroff_reboot_com = true;
+ amdgpu_device_ip_suspend(adev);
++ adev->in_poweroff_reboot_com = false;
+ adev->mp1_state = PP_MP1_STATE_NONE;
+ }
+
+@@ -1308,8 +1310,13 @@ static int amdgpu_pmops_thaw(struct device *dev)
+ static int amdgpu_pmops_poweroff(struct device *dev)
+ {
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct amdgpu_device *adev = drm_to_adev(drm_dev);
++ int r;
+
+- return amdgpu_device_suspend(drm_dev, true);
++ adev->in_poweroff_reboot_com = true;
++ r = amdgpu_device_suspend(drm_dev, true);
++ adev->in_poweroff_reboot_com = false;
++ return r;
+ }
+
+ static int amdgpu_pmops_restore(struct device *dev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 82e952696d24f..1fb2a91ad30ad 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
++ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 6752d8b131188..ce8dc995c10cf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -21,7 +21,7 @@
+ *
+ */
+
+-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _AMDGPU_TRACE_H_
+
+ #include
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index d86b42a365601..e7d6da05011ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -71,6 +71,11 @@
+ #define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+ #define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+
++#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006
++#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1
++#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007
++#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1
++
+ #define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55
+ #define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0
+ #define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0
+@@ -99,10 +104,6 @@
+ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
+ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
+
+-#define mmCGTS_TCC_DISABLE_Vangogh 0x5006
+-#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1
+-#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007
+-#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1
+ #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
+ #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
+ #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
+@@ -4942,15 +4943,12 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+ /* TCCs are global (not instanced). */
+ uint32_t tcc_disable;
+
+- switch (adev->asic_type) {
+- case CHIP_VANGOGH:
+- tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
+- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
+- break;
+- default:
++ if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
++ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
++ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
++ } else {
+ tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+- RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+- break;
++ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+ }
+
+ adev->gfx.config.tcc_disabled_mask =
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 0b3516c4eefb3..b2a93d8010828 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -241,6 +241,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ {
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
++ if (adev->asic_type == CHIP_RENOIR)
++ return 10000;
+ if (adev->asic_type == CHIP_RAVEN)
+ return reference_clock / 4;
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+index 16262e5d93f5c..7351dd195274e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+@@ -243,11 +243,11 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
+ static inline void dqm_lock(struct device_queue_manager *dqm)
+ {
+ mutex_lock(&dqm->lock_hidden);
+- dqm->saved_flags = memalloc_nofs_save();
++ dqm->saved_flags = memalloc_noreclaim_save();
+ }
+ static inline void dqm_unlock(struct device_queue_manager *dqm)
+ {
+- memalloc_nofs_restore(dqm->saved_flags);
++ memalloc_noreclaim_restore(dqm->saved_flags);
+ mutex_unlock(&dqm->lock_hidden);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 961abf1cf040c..947cd923fb4c3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1131,7 +1131,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->dm.hdcp_workqueue) {
+- hdcp_destroy(adev->dm.hdcp_workqueue);
++ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+@@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ dc_commit_updates_for_stream(
+ dm->dc, bundle->surface_updates,
+ dc_state->stream_status->plane_count,
+- dc_state->streams[k], &bundle->stream_update);
++ dc_state->streams[k], &bundle->stream_update, dc_state);
+ }
+
+ cleanup:
+@@ -1965,7 +1965,8 @@ static void dm_set_dpms_off(struct dc_link *link)
+
+ stream_update.stream = stream_state;
+ dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
+- stream_state, &stream_update);
++ stream_state, &stream_update,
++ stream_state->ctx->dc->current_state);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+@@ -7548,7 +7549,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_crtc *pcrtc,
+ bool wait_for_vblank)
+ {
+- int i;
++ uint32_t i;
+ uint64_t timestamp_ns;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+@@ -7589,7 +7590,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ amdgpu_dm_commit_cursors(state);
+
+ /* update planes when needed */
+- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
++ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+@@ -7812,7 +7813,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ bundle->surface_updates,
+ planes_count,
+ acrtc_state->stream,
+- &bundle->stream_update);
++ &bundle->stream_update,
++ dc_state);
+
+ /**
+ * Enable or disable the interrupts on the backend.
+@@ -8148,13 +8150,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+- struct dc_surface_update surface_updates[MAX_SURFACES];
++ struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ struct dc_info_packet hdr_packet;
+ struct dc_stream_status *status = NULL;
+ bool abm_changed, hdr_changed, scaling_changed;
+
+- memset(&surface_updates, 0, sizeof(surface_updates));
++ memset(&dummy_updates, 0, sizeof(dummy_updates));
+ memset(&stream_update, 0, sizeof(stream_update));
+
+ if (acrtc) {
+@@ -8211,15 +8213,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ * To fix this, DC should permit updating only stream properties.
+ */
+ for (j = 0; j < status->plane_count; j++)
+- surface_updates[j].surface = status->plane_states[j];
++ dummy_updates[j].surface = status->plane_states[0];
+
+
+ mutex_lock(&dm->dc_lock);
+ dc_commit_updates_for_stream(dm->dc,
+- surface_updates,
++ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+- &stream_update);
++ &stream_update,
++ dc_state);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index c2cd184f0bbd4..79de68ac03f20 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -376,7 +376,7 @@ static void event_cpirq(struct work_struct *work)
+ }
+
+
+-void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
++void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
+ {
+ int i = 0;
+
+@@ -385,6 +385,7 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+ cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ }
+
++ sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
+ kfree(hdcp_work);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+index 5159b3a5e5b03..09294ff122fea 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+@@ -69,7 +69,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
+
+ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+ void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+-void hdcp_destroy(struct hdcp_workqueue *work);
++void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work);
+
+ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index 070459e3e4070..afc10b954ffa7 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
+ cntl->enable_dp_audio);
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
++ switch (cntl->color_depth) {
++ case COLOR_DEPTH_888:
++ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_101010:
++ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_121212:
++ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_161616:
++ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
++ break;
++ default:
++ break;
++ }
++
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
++ switch (cntl->color_depth) {
++ case COLOR_DEPTH_888:
++ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_101010:
++ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_121212:
++ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_161616:
++ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
++ break;
++ default:
++ break;
++ }
++
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
+ * driver choose program it itself, i.e. here we program it
+ * to 888 by default.
+ */
++ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
++ switch (bp_params->color_depth) {
++ case TRANSMITTER_COLOR_DEPTH_30:
++ /* yes this is correct, the atom define is wrong */
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
++ break;
++ case TRANSMITTER_COLOR_DEPTH_36:
++ /* yes this is correct, the atom define is wrong */
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++ break;
++ default:
++ break;
++ }
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
+ * driver choose program it itself, i.e. here we pass required
+ * target rate that includes deep color.
+ */
++ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
++ switch (bp_params->color_depth) {
++ case TRANSMITTER_COLOR_DEPTH_30:
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
++ break;
++ case TRANSMITTER_COLOR_DEPTH_36:
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
++ break;
++ case TRANSMITTER_COLOR_DEPTH_48:
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++ break;
++ default:
++ break;
++ }
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 6cf1a5a2a5ecc..58eb0d69873a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2679,7 +2679,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+- struct dc_stream_update *stream_update)
++ struct dc_stream_update *stream_update,
++ struct dc_state *state)
+ {
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+@@ -2698,12 +2699,6 @@ void dc_commit_updates_for_stream(struct dc *dc,
+
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+- struct dc_plane_state *new_planes[MAX_SURFACES];
+-
+- memset(new_planes, 0, sizeof(new_planes));
+-
+- for (i = 0; i < surface_count; i++)
+- new_planes[i] = srf_updates[i].surface;
+
+ /* initialize scratch memory for building context */
+ context = dc_create_state(dc);
+@@ -2712,21 +2707,15 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ return;
+ }
+
+- dc_resource_state_copy_construct(
+- dc->current_state, context);
++ dc_resource_state_copy_construct(state, context);
+
+- /*remove old surfaces from context */
+- if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+- DC_ERROR("Failed to remove streams for new validate context!\n");
+- return;
+- }
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
++ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+- /* add surface to context */
+- if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+- DC_ERROR("Failed to add streams for new validate context!\n");
+- return;
++ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
++ new_pipe->plane_state->force_full_update = true;
+ }
+-
+ }
+
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index e243c01b9672e..b7910976b81a7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -283,7 +283,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+- struct dc_stream_update *stream_update);
++ struct dc_stream_update *stream_update,
++ struct dc_state *state);
+ /*
+ * Log the current stream state.
+ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index fb733f573715e..466f8f5803c9c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -871,6 +871,20 @@ static bool dce110_program_pix_clk(
+ bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+
++ switch (pix_clk_params->color_depth) {
++ case COLOR_DEPTH_101010:
++ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
++ break;
++ case COLOR_DEPTH_121212:
++ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
++ break;
++ case COLOR_DEPTH_161616:
++ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
++ break;
++ default:
++ break;
++ }
++
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index ada57f745fd76..19e380e0a3301 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -564,6 +564,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
++ cntl.color_depth = crtc_timing->display_color_depth;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index 130a0a0c83329..68028ec995e74 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -601,12 +601,12 @@ static void set_clamp(
+ clamp_max = 0x3FC0;
+ break;
+ case COLOR_DEPTH_101010:
+- /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+- clamp_max = 0x3FFC;
++ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
++ clamp_max = 0x3FF0;
+ break;
+ case COLOR_DEPTH_121212:
+- /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+- clamp_max = 0x3FFF;
++ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
++ clamp_max = 0x3FFC;
+ break;
+ default:
+ clamp_max = 0x3FC0;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+index 81db0179f7ea8..85dc2b16c9418 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+@@ -480,7 +480,6 @@ unsigned int dcn10_get_dig_frontend(struct link_encoder *enc)
+ break;
+ default:
+ // invalid source select DIG
+- ASSERT(false);
+ result = ENGINE_ID_UNKNOWN;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index d6b4885618713..354c2a2702d79 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -408,8 +408,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ },
+ },
+ .num_states = 5,
+- .sr_exit_time_us = 11.6,
+- .sr_enter_plus_exit_time_us = 13.9,
++ .sr_exit_time_us = 8.6,
++ .sr_enter_plus_exit_time_us = 10.9,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+@@ -3245,7 +3245,7 @@ restore_dml_state:
+ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+ {
+- bool voltage_supported = false;
++ bool voltage_supported;
+ DC_FP_START();
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
+ DC_FP_END();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 6743764289167..072f8c8809243 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -1329,8 +1329,8 @@ validate_out:
+ return out;
+ }
+
+-bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
+- bool fast_validate)
++static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
++ struct dc_state *context, bool fast_validate)
+ {
+ bool out = false;
+
+@@ -1383,6 +1383,22 @@ validate_out:
+
+ return out;
+ }
++
++/*
++ * Some of the functions further below use the FPU, so we need to wrap this
++ * with DC_FP_START()/DC_FP_END(). Use the same approach as for
++ * dcn20_validate_bandwidth in dcn20_resource.c.
++ */
++bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,
++ bool fast_validate)
++{
++ bool voltage_supported;
++ DC_FP_START();
++ voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate);
++ DC_FP_END();
++ return voltage_supported;
++}
++
+ static void dcn21_destroy_resource_pool(struct resource_pool **pool)
+ {
+ struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 3deb3fb1724dc..0631c16f9aff8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -539,6 +539,8 @@ void dcn30_init_hw(struct dc *dc)
+
+ fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
++ if (fe == ENGINE_ID_UNKNOWN)
++ continue;
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+index 1b971265418b6..0e0f494fbb5e1 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+@@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .ack = NULL
+ };
+
++static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
++ .set = NULL,
++ .ack = NULL
++};
++
+ #undef BASE_INNER
+ #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+@@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .funcs = &vblank_irq_info_funcs\
+ }
+
++/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
++ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
++ */
++#define vupdate_no_lock_int_entry(reg_num)\
++ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
++ IRQ_REG_ENTRY(OTG, reg_num,\
++ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
++ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
++ .funcs = &vupdate_no_lock_irq_info_funcs\
++ }
++
+ #define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+@@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
++ vupdate_no_lock_int_entry(0),
++ vupdate_no_lock_int_entry(1),
++ vupdate_no_lock_int_entry(2),
++ vupdate_no_lock_int_entry(3),
++ vupdate_no_lock_int_entry(4),
++ vupdate_no_lock_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+index 7b6ef05a1d35a..0b5be50b2eeeb 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+@@ -1074,7 +1074,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ {
+ int ret;
+- long level;
++ unsigned long level;
+ char *sub_str = NULL;
+ char *tmp;
+ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+@@ -1090,8 +1090,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
+ if (strlen(sub_str)) {
+- ret = kstrtol(sub_str, 0, &level);
+- if (ret)
++ ret = kstrtoul(sub_str, 0, &level);
++ if (ret || level > 31)
+ return -EINVAL;
+ *mask |= 1 << level;
+ } else
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index b11c0522a4410..405501c74e400 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2302,7 +2302,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ }
+
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+- drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
++ drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
++ port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
+ drm_connector_set_tile_property(port->connector);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 4b81195106875..e82db0f4e7715 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -946,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+ drm_modeset_lock_all(fb_helper->dev);
+ drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ crtc = modeset->crtc;
+- if (!crtc->funcs->gamma_set || !crtc->gamma_size)
+- return -EINVAL;
++ if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- if (cmap->start + cmap->len > crtc->gamma_size)
+- return -EINVAL;
++ if (cmap->start + cmap->len > crtc->gamma_size) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+@@ -963,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+ ret = crtc->funcs->gamma_set(crtc, r, g, b,
+ crtc->gamma_size, NULL);
+ if (ret)
+- return ret;
++ goto out;
+ }
++out:
+ drm_modeset_unlock_all(fb_helper->dev);
+
+ return ret;
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 33fb2f05ce662..1ac67d4505e07 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ if (mode->htotal == 0 || mode->vtotal == 0)
+ return 0;
+
+- num = mode->clock * 1000;
++ num = mode->clock;
+ den = mode->htotal * mode->vtotal;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+@@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
+ if (mode->vscan > 1)
+ den *= mode->vscan;
+
+- return DIV_ROUND_CLOSEST(num, den);
++ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
+ }
+ EXPORT_SYMBOL(drm_mode_vrefresh);
+
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+index e281070611480..fc9a34ed58bd1 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+ hdmi_dev = pci_get_drvdata(dev);
+
+ i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+- if (i2c_dev == NULL) {
+- DRM_ERROR("Can't allocate interface\n");
+- ret = -ENOMEM;
+- goto exit;
+- }
++ if (!i2c_dev)
++ return -ENOMEM;
+
+ i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+ i2c_dev->status = I2C_STAT_INIT;
+@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+ oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+ if (ret) {
+ DRM_ERROR("Failed to request IRQ for I2C controller\n");
+- goto err;
++ goto free_dev;
+ }
+
+ /* Adapter registration */
+ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+- return ret;
++ if (ret) {
++ DRM_ERROR("Failed to add I2C adapter\n");
++ goto free_irq;
++ }
+
+-err:
++ return 0;
++
++free_irq:
++ free_irq(dev->irq, hdmi_dev);
++free_dev:
+ kfree(i2c_dev);
+-exit:
++
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index cc2d59e8471da..134068f9328d5 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -312,6 +312,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
+ if (ret)
+ goto out_err;
+
++ ret = -ENOMEM;
++
+ dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
+ if (!dev_priv->mmu)
+ goto out_err;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index 82674a8853c60..2fa9ba36eeaa3 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -2216,7 +2216,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+ has_hdmi_sink))
+ return MODE_CLOCK_HIGH;
+
+- /* BXT DPLL can't generate 223-240 MHz */
++ /* GLK DPLL can't generate 446-480 MHz */
++ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
++ return MODE_CLOCK_RANGE;
++
++ /* BXT/GLK DPLL can't generate 223-240 MHz */
+ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+index e961ad6a31294..4adbc2bba97fb 100644
+--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
++++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+@@ -240,7 +240,7 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+- *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
++ *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+@@ -353,19 +353,21 @@ static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+
+ static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+ {
+- u32 *cs = batch_alloc_items(batch, 0, 8);
++ u32 *cs = batch_alloc_items(batch, 0, 10);
+
+ /* ivb: Stall before STATE_CACHE_INVALIDATE */
+- *cs++ = GFX_OP_PIPE_CONTROL(4);
++ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = 0;
+ *cs++ = 0;
++ *cs++ = 0;
+
+- *cs++ = GFX_OP_PIPE_CONTROL(4);
++ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ *cs++ = 0;
+ *cs++ = 0;
++ *cs++ = 0;
+
+ batch_advance(batch, cs);
+ }
+@@ -391,12 +393,14 @@ static void emit_batch(struct i915_vma * const vma,
+ desc_count);
+
+ /* Reset inherited context registers */
++ gen7_emit_pipeline_flush(&cmds);
+ gen7_emit_pipeline_invalidate(&cmds);
+ batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+ batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+ batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
++ gen7_emit_pipeline_invalidate(&cmds);
+ gen7_emit_pipeline_flush(&cmds);
+
+ /* Switch to the media pipeline and our base address */
+diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
+index 63b4c5643f9cd..5cc20b403a252 100644
+--- a/drivers/gpu/drm/lima/lima_sched.c
++++ b/drivers/gpu/drm/lima/lima_sched.c
+@@ -201,7 +201,7 @@ static int lima_pm_busy(struct lima_device *ldev)
+ int ret;
+
+ /* resume GPU if it has been suspended by runtime PM */
+- ret = pm_runtime_get_sync(ldev->dev);
++ ret = pm_runtime_resume_and_get(ldev->dev);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+index 74ef6fc0528b6..523716e3c278a 100644
+--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+@@ -267,7 +267,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ }
+
+ con = ovl_fmt_convert(ovl, fmt);
+- if (state->base.fb->format->has_alpha)
++ if (state->base.fb && state->base.fb->format->has_alpha)
+ con |= OVL_CON_AEN | OVL_CON_ALPHA;
+
+ if (pending->rotation & DRM_MODE_REFLECT_Y) {
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index e6703ae987608..b3318f86aabc0 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -264,6 +264,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ }
+ name = "GPU_SET";
+ break;
++ case GMU_OOB_PERFCOUNTER_SET:
++ if (gmu->legacy) {
++ request = GMU_OOB_PERFCOUNTER_REQUEST;
++ ack = GMU_OOB_PERFCOUNTER_ACK;
++ } else {
++ request = GMU_OOB_PERFCOUNTER_REQUEST_NEW;
++ ack = GMU_OOB_PERFCOUNTER_ACK_NEW;
++ }
++ name = "PERFCOUNTER";
++ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ request = GMU_OOB_BOOT_SLUMBER_REQUEST;
+ ack = GMU_OOB_BOOT_SLUMBER_ACK;
+@@ -301,9 +311,14 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ {
+ if (!gmu->legacy) {
+- WARN_ON(state != GMU_OOB_GPU_SET);
+- gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+- 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
++ if (state == GMU_OOB_GPU_SET) {
++ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
++ 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
++ } else {
++ WARN_ON(state != GMU_OOB_PERFCOUNTER_SET);
++ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
++ 1 << GMU_OOB_PERFCOUNTER_CLEAR_NEW);
++ }
+ return;
+ }
+
+@@ -312,6 +327,10 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR);
+ break;
++ case GMU_OOB_PERFCOUNTER_SET:
++ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
++ 1 << GMU_OOB_PERFCOUNTER_CLEAR);
++ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+index c6d2bced8e5de..9fa278de2106a 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+@@ -156,6 +156,7 @@ enum a6xx_gmu_oob_state {
+ GMU_OOB_BOOT_SLUMBER = 0,
+ GMU_OOB_GPU_SET,
+ GMU_OOB_DCVS_SET,
++ GMU_OOB_PERFCOUNTER_SET,
+ };
+
+ /* These are the interrupt / ack bits for each OOB request that are set
+@@ -190,6 +191,13 @@ enum a6xx_gmu_oob_state {
+ #define GMU_OOB_GPU_SET_ACK_NEW 31
+ #define GMU_OOB_GPU_SET_CLEAR_NEW 31
+
++#define GMU_OOB_PERFCOUNTER_REQUEST 17
++#define GMU_OOB_PERFCOUNTER_ACK 25
++#define GMU_OOB_PERFCOUNTER_CLEAR 25
++
++#define GMU_OOB_PERFCOUNTER_REQUEST_NEW 28
++#define GMU_OOB_PERFCOUNTER_ACK_NEW 30
++#define GMU_OOB_PERFCOUNTER_CLEAR_NEW 30
+
+ void a6xx_hfi_init(struct a6xx_gmu *gmu);
+ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 130661898546a..0366419d8bfed 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1117,7 +1117,7 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+ a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+
+- if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
++ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
+ }
+
+@@ -1169,14 +1169,18 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
++ static DEFINE_MUTEX(perfcounter_oob);
++
++ mutex_lock(&perfcounter_oob);
+
+ /* Force the GPU power on so we can read this register */
+- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
++ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+
+- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
++ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
++ mutex_unlock(&perfcounter_oob);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index f09175698827a..b35914de1b275 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -200,15 +200,15 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ if (!iommu)
+ return NULL;
+
+-
+ if (adreno_is_a6xx(adreno_gpu)) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct io_pgtable_domain_attr pgtbl_cfg;
++
+ /*
+- * This allows GPU to set the bus attributes required to use system
+- * cache on behalf of the iommu page table walker.
+- */
+- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
++ * This allows GPU to set the bus attributes required to use system
++ * cache on behalf of the iommu page table walker.
++ */
++ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) {
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ }
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index 0c8f9f88301fa..f5d71b2740793 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1180,7 +1180,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+ pp_done);
+
+- complete(&mdp5_crtc->pp_completion);
++ complete_all(&mdp5_crtc->pp_completion);
+ }
+
+ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+index e3462f5d96d75..6cbe10af0a7af 100644
+--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
++++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
+@@ -631,7 +631,7 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
+
+ tu = kzalloc(sizeof(*tu), GFP_KERNEL);
+ if (!tu)
+- return
++ return;
+
+ dp_panel_update_tu_timings(in, tu);
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 3bc7ed21de286..81f6794a25100 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -651,8 +651,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+ dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+
+ /* signal the disconnect event early to ensure proper teardown */
+- dp_display_handle_plugged_change(g_dp_display, false);
+ reinit_completion(&dp->audio_comp);
++ dp_display_handle_plugged_change(g_dp_display, false);
+
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+ DP_DP_IRQ_HPD_INT_MASK, true);
+@@ -890,6 +890,9 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+
+ /* wait only if audio was enabled */
+ if (dp_display->audio_enabled) {
++ /* signal the disconnect event */
++ reinit_completion(&dp->audio_comp);
++ dp_display_handle_plugged_change(dp_display, false);
+ if (!wait_for_completion_timeout(&dp->audio_comp,
+ HZ * 5))
+ DRM_ERROR("audio comp timeout\n");
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+index 1afb7c579dbbb..eca86bf448f74 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ .disable = dsi_20nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
+ },
+- .io_start = { 0xfd998300, 0xfd9a0300 },
++ .io_start = { 0xfd998500, 0xfd9a0500 },
+ .num_dsi_phy = 2,
+ };
+
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 108c405e03dd9..94525ac76d4e6 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -788,9 +788,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
+ struct drm_file *file, struct drm_gem_object *obj,
+ uint64_t *iova)
+ {
++ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+
+- if (!ctx->aspace)
++ if (!priv->gpu)
+ return -EINVAL;
+
+ /*
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index d04c349d8112a..5480852bdedaf 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -198,6 +198,8 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
+ submit->cmd[i].idx = submit_cmd.submit_idx;
+ submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+
++ userptr = u64_to_user_ptr(submit_cmd.relocs);
++
+ sz = array_size(submit_cmd.nr_relocs,
+ sizeof(struct drm_msm_gem_submit_reloc));
+ /* check for overflow: */
+diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
+index d8151a89e1631..4735251a394d8 100644
+--- a/drivers/gpu/drm/msm/msm_kms.h
++++ b/drivers/gpu/drm/msm/msm_kms.h
+@@ -157,6 +157,7 @@ struct msm_kms {
+ * from the crtc's pending_timer close to end of the frame:
+ */
+ struct mutex commit_lock[MAX_CRTCS];
++ struct lock_class_key commit_lock_keys[MAX_CRTCS];
+ unsigned pending_crtc_mask;
+ struct msm_pending_timer pending_timers[MAX_CRTCS];
+ };
+@@ -166,8 +167,11 @@ static inline int msm_kms_init(struct msm_kms *kms,
+ {
+ unsigned i, ret;
+
+- for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+- mutex_init(&kms->commit_lock[i]);
++ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
++ lockdep_register_key(&kms->commit_lock_keys[i]);
++ __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
++ &kms->commit_lock_keys[i]);
++ }
+
+ kms->funcs = funcs;
+
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+index f5f59261ea819..d1beaad0c82b6 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+@@ -14,6 +14,7 @@ enum dcb_connector_type {
+ DCB_CONNECTOR_LVDS_SPWG = 0x41,
+ DCB_CONNECTOR_DP = 0x46,
+ DCB_CONNECTOR_eDP = 0x47,
++ DCB_CONNECTOR_mDP = 0x48,
+ DCB_CONNECTOR_HDMI_0 = 0x60,
+ DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_HDMI_C = 0x63,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
+index 5d191e58edf11..e48f1f7eb3705 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
++++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
+@@ -533,6 +533,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ if (ret) {
+ NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
+ nouveau_channel_del(pchan);
++ goto done;
+ }
+
+ ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 8b4b3688c7ae3..4c992fd5bd68a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1210,6 +1210,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
+ case DCB_CONNECTOR_DP :
++ case DCB_CONNECTOR_mDP :
+ case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
+ case DCB_CONNECTOR_HDMI_0 :
+diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+index bc36aa3c11234..fe5ac3ef90185 100644
+--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
++++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+@@ -265,7 +265,8 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+- MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
++ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET |
++ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+index 0c5f22e95c2db..624d17b96a693 100644
+--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
++++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+@@ -22,6 +22,7 @@
+ /* Manufacturer specific Commands send via DSI */
+ #define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41
+ #define MANTIX_CMD_INT_CANCEL 0x4C
++#define MANTIX_CMD_SPI_FINISH 0x90
+
+ struct mantix {
+ struct device *dev;
+@@ -66,6 +67,10 @@ static int mantix_init_sequence(struct mantix *ctx)
+ dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ msleep(20);
+
++ dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
++ dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
++ msleep(20);
++
+ dev_dbg(dev, "Panel init sequence done\n");
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+index 6b4e97bfd46ee..603c5dfe87682 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+@@ -25,6 +25,14 @@
+ /* Manufacturer Command Set */
+ #define MCS_ELVSS_ON 0xb1
+ #define MCS_TEMP_SWIRE 0xb2
++#define MCS_PENTILE_1 0xb3
++#define MCS_PENTILE_2 0xb4
++#define MCS_GAMMA_DELTA_Y_RED 0xb5
++#define MCS_GAMMA_DELTA_X_RED 0xb6
++#define MCS_GAMMA_DELTA_Y_GREEN 0xb7
++#define MCS_GAMMA_DELTA_X_GREEN 0xb8
++#define MCS_GAMMA_DELTA_Y_BLUE 0xb9
++#define MCS_GAMMA_DELTA_X_BLUE 0xba
+ #define MCS_MIECTL1 0xc0
+ #define MCS_BCMODE 0xc1
+ #define MCS_ERROR_CHECK 0xd5
+@@ -281,6 +289,7 @@ struct s6e63m0 {
+ struct backlight_device *bl_dev;
+ u8 lcd_type;
+ u8 elvss_pulse;
++ bool dsi_mode;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+@@ -395,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
+
+ static void s6e63m0_init(struct s6e63m0 *ctx)
+ {
+- s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
+- 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
+- 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
++ /*
++ * We do not know why there is a difference in the DSI mode.
++ * (No datasheet.)
++ *
++ * In the vendor driver this sequence is called
++ * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET".
++ */
++ if (ctx->dsi_mode)
++ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
++ 0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3,
++ 0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00);
++ else
++ s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
++ 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
++ 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
+ 0x02, 0x03, 0x1c, 0x10, 0x10);
+@@ -414,40 +435,40 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
+
+ s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
+ 0x00, 0x8e, 0x07);
+- s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
++ s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c);
+
+- s6e63m0_dcs_write_seq_static(ctx, 0xb5,
++ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED,
+ 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 0x21, 0x20, 0x1e, 0x1e);
+
+- s6e63m0_dcs_write_seq_static(ctx, 0xb6,
++ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
+
+- s6e63m0_dcs_write_seq_static(ctx, 0xb7,
++ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN,
+ 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 0x21, 0x20, 0x1e, 0x1e);
+
+- s6e63m0_dcs_write_seq_static(ctx, 0xb8,
++ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
+
+- s6e63m0_dcs_write_seq_static(ctx, 0xb9,
++ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE,
+ 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
+ 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
+ 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
+ 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
+ 0x21, 0x20, 0x1e, 0x1e);
+
+- s6e63m0_dcs_write_seq_static(ctx, 0xba,
++ s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
+@@ -671,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = {
+ .update_status = s6e63m0_set_brightness,
+ };
+
+-static int s6e63m0_backlight_register(struct s6e63m0 *ctx)
++static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness)
+ {
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+- .brightness = MAX_BRIGHTNESS,
+- .max_brightness = MAX_BRIGHTNESS
++ .brightness = max_brightness,
++ .max_brightness = max_brightness,
+ };
+ struct device *dev = ctx->dev;
+ int ret = 0;
+@@ -698,12 +719,14 @@ int s6e63m0_probe(struct device *dev,
+ bool dsi_mode)
+ {
+ struct s6e63m0 *ctx;
++ u32 max_brightness;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
++ ctx->dsi_mode = dsi_mode;
+ ctx->dcs_read = dcs_read;
+ ctx->dcs_write = dcs_write;
+ dev_set_drvdata(dev, ctx);
+@@ -712,6 +735,14 @@ int s6e63m0_probe(struct device *dev,
+ ctx->enabled = false;
+ ctx->prepared = false;
+
++ ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
++ if (ret)
++ max_brightness = MAX_BRIGHTNESS;
++ if (max_brightness > MAX_BRIGHTNESS) {
++ dev_err(dev, "illegal max brightness specified\n");
++ max_brightness = MAX_BRIGHTNESS;
++ }
++
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+@@ -731,7 +762,7 @@ int s6e63m0_probe(struct device *dev,
+ dsi_mode ? DRM_MODE_CONNECTOR_DSI :
+ DRM_MODE_CONNECTOR_DPI);
+
+- ret = s6e63m0_backlight_register(ctx);
++ ret = s6e63m0_backlight_register(ctx, max_brightness);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c
+index c578095b09a53..382d53f8a22e8 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_cmm.c
++++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c
+@@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev)
+ {
+ int ret;
+
+- ret = pm_runtime_get_sync(&pdev->dev);
++ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index b5fb941e0f534..e23b9c7b4afeb 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
+ */
+ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
+ rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+- struct rcar_du_encoder *encoder =
+- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
++ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
+ const struct drm_display_mode *mode =
+ &crtc->state->adjusted_mode;
+- struct drm_bridge *bridge;
+
+- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ rcar_lvds_clk_enable(bridge, mode->clock * 1000);
+ }
+
+@@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
+
+ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
+ rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
+- struct rcar_du_encoder *encoder =
+- rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
+- struct drm_bridge *bridge;
++ struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
+
+ /*
+ * Disable the LVDS clock output, see
+ * rcar_du_crtc_atomic_enable().
+ */
+- bridge = drm_bridge_chain_get_first_bridge(&encoder->base);
+ rcar_lvds_clk_disable(bridge);
+ }
+
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index 61504c54e2ecf..3597a179bfb78 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -20,10 +20,10 @@
+
+ struct clk;
+ struct device;
++struct drm_bridge;
+ struct drm_device;
+ struct drm_property;
+ struct rcar_du_device;
+-struct rcar_du_encoder;
+
+ #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
+ #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
+@@ -71,6 +71,7 @@ struct rcar_du_device_info {
+ #define RCAR_DU_MAX_CRTCS 4
+ #define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
+ #define RCAR_DU_MAX_VSPS 4
++#define RCAR_DU_MAX_LVDS 2
+
+ struct rcar_du_device {
+ struct device *dev;
+@@ -83,11 +84,10 @@ struct rcar_du_device {
+ struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
+ unsigned int num_crtcs;
+
+- struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX];
+-
+ struct rcar_du_group groups[RCAR_DU_MAX_GROUPS];
+ struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
+ struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
++ struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
+
+ struct {
+ struct drm_property *colorkey;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+index b0335da0c1614..50fc14534fa4d 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+@@ -57,7 +57,6 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ if (renc == NULL)
+ return -ENOMEM;
+
+- rcdu->encoders[output] = renc;
+ renc->output = output;
+ encoder = rcar_encoder_to_drm_encoder(renc);
+
+@@ -91,6 +90,10 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ ret = -EPROBE_DEFER;
+ goto done;
+ }
++
++ if (output == RCAR_DU_OUTPUT_LVDS0 ||
++ output == RCAR_DU_OUTPUT_LVDS1)
++ rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+index 72dda446355fe..7015e22872bbe 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+@@ -700,10 +700,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+ int ret;
+
+ cmm = of_parse_phandle(np, "renesas,cmms", i);
+- if (IS_ERR(cmm)) {
++ if (!cmm) {
+ dev_err(rcdu->dev,
+ "Failed to parse 'renesas,cmms' property\n");
+- return PTR_ERR(cmm);
++ return -EINVAL;
+ }
+
+ if (!of_device_is_available(cmm)) {
+@@ -713,10 +713,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu)
+ }
+
+ pdev = of_find_device_by_node(cmm);
+- if (IS_ERR(pdev)) {
++ if (!pdev) {
+ dev_err(rcdu->dev, "No device found for CMM%u\n", i);
+ of_node_put(cmm);
+- return PTR_ERR(pdev);
++ return -EINVAL;
+ }
+
+ of_node_put(cmm);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+index 4a2099cb582e1..857d97cdc67c6 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+@@ -17,9 +17,20 @@
+
+ #define NUM_YUV2YUV_COEFFICIENTS 12
+
++/* AFBC supports a number of configurable modes. Relevant to us is block size
++ * (16x16 or 32x8), storage modifiers (SPARSE, SPLIT), and the YUV-like
++ * colourspace transform (YTR). 16x16 SPARSE mode is always used. SPLIT mode
++ * could be enabled via the hreg_block_split register, but is not currently
++ * handled. The colourspace transform is implicitly always assumed by the
++ * decoder, so consumers must use this transform as well.
++ *
++ * Failure to match modifiers will cause errors displaying AFBC buffers
++ * produced by conformant AFBC producers, including Mesa.
++ */
+ #define ROCKCHIP_AFBC_MOD \
+ DRM_FORMAT_MOD_ARM_AFBC( \
+ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
++ | AFBC_FORMAT_MOD_YTR \
+ )
+
+ enum vop_data_format {
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index b498d474ef9e4..864e423d6d2ba 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -891,6 +891,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
+ if (sched->thread)
+ kthread_stop(sched->thread);
+
++ /* Confirm no work left behind accessing device structures */
++ cancel_delayed_work_sync(&sched->work_tdr);
++
+ sched->ready = false;
+ }
+ EXPORT_SYMBOL(drm_sched_fini);
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index 1e643bc7e786a..9f06dec0fc61d 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+
+- /*
+- * On A20 and similar SoCs, the only way to achieve Positive Edge
+- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+- * By default TCON works in Negative Edge(Falling Edge),
+- * this is why phase is set to 0 in that case.
+- * Unfortunately there's no way to logically invert dclk through
+- * IO_POL register.
+- * The only acceptable way to work, triple checked with scope,
+- * is using clock phase set to 0° for Negative Edge and set to 240°
+- * for Positive Edge.
+- * On A33 and similar SoCs there would be a 90° phase option,
+- * but it divides also dclk by 2.
+- * Following code is a way to avoid quirks all around TCON
+- * and DOTCLOCK drivers.
+- */
+- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+- clk_set_phase(tcon->dclk, 240);
+-
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+- clk_set_phase(tcon->dclk, 0);
++ val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
+ SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
++ SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
+ SUN4I_TCON0_IO_POL_DE_NEGATIVE,
+ val);
+
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
+index ee555318e3c2f..e624f6977eb84 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
+@@ -113,6 +113,7 @@
+ #define SUN4I_TCON0_IO_POL_REG 0x88
+ #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
+ #define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
++#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
+ #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
+ #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
+
+diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
+index 85dd7131553af..0ae3a025efe9d 100644
+--- a/drivers/gpu/drm/tegra/dc.c
++++ b/drivers/gpu/drm/tegra/dc.c
+@@ -2186,7 +2186,7 @@ static int tegra_dc_runtime_resume(struct host1x_client *client)
+ struct device *dev = client->dev;
+ int err;
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
+index 5691ef1b0e586..f46d377f0c304 100644
+--- a/drivers/gpu/drm/tegra/dsi.c
++++ b/drivers/gpu/drm/tegra/dsi.c
+@@ -1111,7 +1111,7 @@ static int tegra_dsi_runtime_resume(struct host1x_client *client)
+ struct device *dev = client->dev;
+ int err;
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
+index d09a24931c87c..e5d2a40260288 100644
+--- a/drivers/gpu/drm/tegra/hdmi.c
++++ b/drivers/gpu/drm/tegra/hdmi.c
+@@ -1510,7 +1510,7 @@ static int tegra_hdmi_runtime_resume(struct host1x_client *client)
+ struct device *dev = client->dev;
+ int err;
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
+index 22a03f7ffdc12..5ce771cba1335 100644
+--- a/drivers/gpu/drm/tegra/hub.c
++++ b/drivers/gpu/drm/tegra/hub.c
+@@ -789,7 +789,7 @@ static int tegra_display_hub_runtime_resume(struct host1x_client *client)
+ unsigned int i;
+ int err;
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
+index cc2aa2308a515..f02a035dda453 100644
+--- a/drivers/gpu/drm/tegra/sor.c
++++ b/drivers/gpu/drm/tegra/sor.c
+@@ -3218,7 +3218,7 @@ static int tegra_sor_runtime_resume(struct host1x_client *client)
+ struct device *dev = client->dev;
+ int err;
+
+- err = pm_runtime_get_sync(dev);
++ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "failed to get runtime PM: %d\n", err);
+ return err;
+diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
+index ade56b860cf9d..b77f726303d89 100644
+--- a/drivers/gpu/drm/tegra/vic.c
++++ b/drivers/gpu/drm/tegra/vic.c
+@@ -314,7 +314,7 @@ static int vic_open_channel(struct tegra_drm_client *client,
+ struct vic *vic = to_vic(client);
+ int err;
+
+- err = pm_runtime_get_sync(vic->dev);
++ err = pm_runtime_resume_and_get(vic->dev);
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 9a03c7834b1ed..22073e77fdf9a 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -967,8 +967,10 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
+ return ret;
+ /* move to the bounce domain */
+ ret = ttm_bo_handle_move_mem(bo, &hop_mem, false, ctx, NULL);
+- if (ret)
++ if (ret) {
++ ttm_resource_free(bo, &hop_mem);
+ return ret;
++ }
+ return 0;
+ }
+
+@@ -1000,18 +1002,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ * stop and the driver will be called to make
+ * the second hop.
+ */
+-bounce:
+ ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
+ if (ret)
+ return ret;
++bounce:
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, ctx, &hop);
+ if (ret == -EMULTIHOP) {
+ ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
+ if (ret)
+- return ret;
++ goto out;
+ /* try and move to final place now. */
+ goto bounce;
+ }
++out:
+ if (ret)
+ ttm_resource_free(bo, &mem);
+ return ret;
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 98cab0bbe92d8..a9f494590c578 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -119,24 +119,57 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
+ HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
+ }
+
++#ifdef CONFIG_DRM_VC4_HDMI_CEC
++static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
++{
++ u16 clk_cnt;
++ u32 value;
++
++ value = HDMI_READ(HDMI_CEC_CNTRL_1);
++ value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
++
++ /*
++ * Set the clock divider: the hsm_clock rate and this divider
++ * setting will give a 40 kHz CEC clock.
++ */
++ clk_cnt = clk_get_rate(vc4_hdmi->hsm_clock) / CEC_CLOCK_FREQ;
++ value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT;
++ HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
++}
++#else
++static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
++#endif
++
+ static enum drm_connector_status
+ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+ {
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
++ bool connected = false;
+
+ if (vc4_hdmi->hpd_gpio) {
+ if (gpio_get_value_cansleep(vc4_hdmi->hpd_gpio) ^
+ vc4_hdmi->hpd_active_low)
+- return connector_status_connected;
+- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+- return connector_status_disconnected;
++ connected = true;
++ } else if (drm_probe_ddc(vc4_hdmi->ddc)) {
++ connected = true;
++ } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) {
++ connected = true;
+ }
+
+- if (drm_probe_ddc(vc4_hdmi->ddc))
+- return connector_status_connected;
++ if (connected) {
++ if (connector->status != connector_status_connected) {
++ struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
++
++ if (edid) {
++ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
++ vc4_hdmi->encoder.hdmi_monitor = drm_detect_hdmi_monitor(edid);
++ kfree(edid);
++ }
++ }
+
+- if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
+ return connector_status_connected;
++ }
++
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ return connector_status_disconnected;
+ }
+@@ -639,6 +672,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+ return;
+ }
+
++ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
++
+ /*
+ * FIXME: When the pixel freq is 594MHz (4k60), this needs to be setup
+ * at 300MHz.
+@@ -660,9 +695,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
+ return;
+ }
+
+- if (vc4_hdmi->variant->reset)
+- vc4_hdmi->variant->reset(vc4_hdmi);
+-
+ if (vc4_hdmi->variant->phy_init)
+ vc4_hdmi->variant->phy_init(vc4_hdmi, mode);
+
+@@ -790,6 +822,9 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ pixel_rate = mode->clock * 1000;
+ }
+
++ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
++ pixel_rate = pixel_rate * 2;
++
+ if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
+ return -EINVAL;
+
+@@ -1312,13 +1347,20 @@ static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+
+ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1)
+ {
++ struct drm_device *dev = vc4_hdmi->connector.dev;
+ struct cec_msg *msg = &vc4_hdmi->cec_rx_msg;
+ unsigned int i;
+
+ msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
+ VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
++
++ if (msg->len > 16) {
++ drm_err(dev, "Attempting to read too much data (%d)\n", msg->len);
++ return;
++ }
++
+ for (i = 0; i < msg->len; i += 4) {
+- u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + i);
++ u32 val = HDMI_READ(HDMI_CEC_RX_DATA_1 + (i >> 2));
+
+ msg->msg[i] = val & 0xff;
+ msg->msg[i + 1] = (val >> 8) & 0xff;
+@@ -1411,11 +1453,17 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+ {
+ struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
++ struct drm_device *dev = vc4_hdmi->connector.dev;
+ u32 val;
+ unsigned int i;
+
++ if (msg->len > 16) {
++ drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
++ return -ENOMEM;
++ }
++
+ for (i = 0; i < msg->len; i += 4)
+- HDMI_WRITE(HDMI_CEC_TX_DATA_1 + i,
++ HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2),
+ (msg->msg[i]) |
+ (msg->msg[i + 1] << 8) |
+ (msg->msg[i + 2] << 16) |
+@@ -1460,16 +1508,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+ cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
+
+ HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff);
++
+ value = HDMI_READ(HDMI_CEC_CNTRL_1);
+- value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+- /*
+- * Set the logical address to Unregistered and set the clock
+- * divider: the hsm_clock rate and this divider setting will
+- * give a 40 kHz CEC clock.
+- */
+- value |= VC4_HDMI_CEC_ADDR_MASK |
+- (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
++ /* Set the logical address to Unregistered */
++ value |= VC4_HDMI_CEC_ADDR_MASK;
+ HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
++
++ vc4_hdmi_cec_update_clk_div(vc4_hdmi);
++
+ ret = devm_request_threaded_irq(&pdev->dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+@@ -1740,6 +1786,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
+ vc4_hdmi->disable_wifi_frequencies =
+ of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
+
++ if (vc4_hdmi->variant->reset)
++ vc4_hdmi->variant->reset(vc4_hdmi);
++
+ pm_runtime_enable(dev);
+
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+index 96d764ebfe675..5379c36f09923 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
++++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+@@ -29,6 +29,7 @@ enum vc4_hdmi_field {
+ HDMI_CEC_CPU_MASK_SET,
+ HDMI_CEC_CPU_MASK_STATUS,
+ HDMI_CEC_CPU_STATUS,
++ HDMI_CEC_CPU_SET,
+
+ /*
+ * Transmit data, first byte is low byte of the 32-bit reg.
+@@ -196,9 +197,10 @@ static const struct vc4_hdmi_register __maybe_unused vc4_hdmi_fields[] = {
+ VC4_HDMI_REG(HDMI_TX_PHY_RESET_CTL, 0x02c0),
+ VC4_HDMI_REG(HDMI_TX_PHY_CTL_0, 0x02c4),
+ VC4_HDMI_REG(HDMI_CEC_CPU_STATUS, 0x0340),
++ VC4_HDMI_REG(HDMI_CEC_CPU_SET, 0x0344),
+ VC4_HDMI_REG(HDMI_CEC_CPU_CLEAR, 0x0348),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_STATUS, 0x034c),
+- VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x034c),
++ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_SET, 0x0350),
+ VC4_HDMI_REG(HDMI_CEC_CPU_MASK_CLEAR, 0x0354),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_START, 0x0400),
+ };
+diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
+index c30c75ee83fce..8502400b2f9c9 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
++++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
+@@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file,
+ int ret;
+ u32 handle;
+
+- if (vgdev->has_virgl_3d)
+- virtio_gpu_create_context(dev, file);
+-
+ ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ if (ret < 0)
+ return ret;
+@@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
+ if (!vgdev->has_virgl_3d)
+ goto out_notify;
+
++ /* the context might still be missing when the first ioctl is
++ * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
++ */
++ virtio_gpu_create_context(obj->dev, file);
++
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
+index b4ec479c32cda..b375394193be8 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
++++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
+@@ -163,6 +163,7 @@ int virtio_gpu_init(struct drm_device *dev)
+ vgdev->host_visible_region.len,
+ dev_name(&vgdev->vdev->dev))) {
+ DRM_ERROR("Could not reserve host visible region\n");
++ ret = -EBUSY;
+ goto err_vqs;
+ }
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 8a8b2b982f83c..097cb1ee31268 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1307,6 +1307,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
+
+ static s32 snto32(__u32 value, unsigned n)
+ {
++ if (!value || !n)
++ return 0;
++
+ switch (n) {
+ case 8: return ((__s8)value);
+ case 16: return ((__s16)value);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 45e7e0bdd382b..fcdc922bc9733 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -980,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ case 0x07:
+ device_type = "eQUAD step 4 Gaming";
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
++ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ case 0x08:
+ device_type = "eQUAD step 4 for gamepads";
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 1bd0eb71559ca..44d715c12f6ab 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2600,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ wacom_wac->is_invalid_bt_frame = !value;
+ return;
+ case HID_DG_CONTACTMAX:
+- features->touch_max = value;
++ if (!features->touch_max) {
++ features->touch_max = value;
++ } else {
++ hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
++ "%d -> %d\n", __func__, features->touch_max, value);
++ }
+ return;
+ }
+
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
+index 7596dc1646484..44a3f5660c109 100644
+--- a/drivers/hsi/controllers/omap_ssi_core.c
++++ b/drivers/hsi/controllers/omap_ssi_core.c
+@@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int err;
+
+- err = pm_runtime_get_sync(ssi->device.parent);
++ err = pm_runtime_resume_and_get(ssi->device.parent);
+ if (err < 0) {
+ dev_err(&ssi->device, "runtime PM failed %d\n", err);
+ return err;
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 1d44bb635bb84..6be9f56cb6270 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -1102,8 +1102,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ vmbus_device_unregister(channel->device_obj);
+ put_device(dev);
+ }
+- }
+- if (channel->primary_channel != NULL) {
++ } else if (channel->primary_channel != NULL) {
+ /*
+ * Sub-channel is being rescinded. Following is the channel
+ * close sequence when initiated from the driveri (refer to
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index b20b6ff17cf65..578d4628d9183 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -226,7 +226,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
+- writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
++ if (drvdata->stallctl)
++ writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
+ writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
+ writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
+@@ -1288,7 +1289,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
+ state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
+ state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+- state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
++ if (drvdata->stallctl)
++ state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
+ state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
+ state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
+ state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
+@@ -1355,7 +1357,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+
+ state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+
+- state->trcpdcr = readl(drvdata->base + TRCPDCR);
++ if (!drvdata->skip_power_up)
++ state->trcpdcr = readl(drvdata->base + TRCPDCR);
+
+ /* wait for TRCSTATR.IDLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+@@ -1373,9 +1376,9 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ * potentially save power on systems that respect the TRCPDCR_PU
+ * despite requesting software to save/restore state.
+ */
+- writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
+- drvdata->base + TRCPDCR);
+-
++ if (!drvdata->skip_power_up)
++ writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
++ drvdata->base + TRCPDCR);
+ out:
+ CS_LOCK(drvdata->base);
+ return ret;
+@@ -1397,7 +1400,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+- writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
++ if (drvdata->stallctl)
++ writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
+ writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
+ writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
+@@ -1469,7 +1473,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+- writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
++ if (!drvdata->skip_power_up)
++ writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+
+ drvdata->state_needs_restore = false;
+
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+index 989ce7b8ade7c..4682f26139961 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+@@ -389,7 +389,7 @@ static ssize_t mode_store(struct device *dev,
+ config->eventctrl1 &= ~BIT(12);
+
+ /* bit[8], Instruction stall bit */
+- if (config->mode & ETM_MODE_ISTALL_EN)
++ if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
+ config->stall_ctrl |= BIT(8);
+ else
+ config->stall_ctrl &= ~BIT(8);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index d8295b1c379d1..35baca2f62c4e 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -159,6 +159,11 @@
+
+ #define IE_S_ALL_INTERRUPT_SHIFT 21
+ #define IE_S_ALL_INTERRUPT_MASK 0x3f
++/*
++ * It takes ~18us to reading 10bytes of data, hence to keep tasklet
++ * running for less time, max slave read per tasklet is set to 10 bytes.
++ */
++#define MAX_SLAVE_RX_PER_INT 10
+
+ enum i2c_slave_read_status {
+ I2C_SLAVE_RX_FIFO_EMPTY = 0,
+@@ -205,8 +210,18 @@ struct bcm_iproc_i2c_dev {
+ /* bytes that have been read */
+ unsigned int rx_bytes;
+ unsigned int thld_bytes;
++
++ bool slave_rx_only;
++ bool rx_start_rcvd;
++ bool slave_read_complete;
++ u32 tx_underrun;
++ u32 slave_int_mask;
++ struct tasklet_struct slave_rx_tasklet;
+ };
+
++/* tasklet to process slave rx data */
++static void slave_rx_tasklet_fn(unsigned long);
++
+ /*
+ * Can be expanded in the future if more interrupt status bits are utilized
+ */
+@@ -215,7 +230,8 @@ struct bcm_iproc_i2c_dev {
+
+ #define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
+ | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\
+- | BIT(IS_S_TX_UNDERRUN_SHIFT))
++ | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
++ | BIT(IS_S_RX_THLD_SHIFT))
+
+ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
+ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
+@@ -259,6 +275,7 @@ static void bcm_iproc_i2c_slave_init(
+ {
+ u32 val;
+
++ iproc_i2c->tx_underrun = 0;
+ if (need_reset) {
+ /* put controller in reset */
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+@@ -295,8 +312,11 @@ static void bcm_iproc_i2c_slave_init(
+
+ /* Enable interrupt register to indicate a valid byte in receive fifo */
+ val = BIT(IE_S_RX_EVENT_SHIFT);
++ /* Enable interrupt register to indicate a Master read transaction */
++ val |= BIT(IE_S_RD_EVENT_SHIFT);
+ /* Enable interrupt register for the Slave BUSY command */
+ val |= BIT(IE_S_START_BUSY_SHIFT);
++ iproc_i2c->slave_int_mask = val;
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+
+@@ -321,76 +341,176 @@ static void bcm_iproc_i2c_check_slave_status(
+ }
+ }
+
+-static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+- u32 status)
++static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+ {
++ u8 rx_data, rx_status;
++ u32 rx_bytes = 0;
+ u32 val;
+- u8 value, rx_status;
+
+- /* Slave RX byte receive */
+- if (status & BIT(IS_S_RX_EVENT_SHIFT)) {
++ while (rx_bytes < MAX_SLAVE_RX_PER_INT) {
+ val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
++ rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++
+ if (rx_status == I2C_SLAVE_RX_START) {
+- /* Start of SMBUS for Master write */
++ /* Start of SMBUS Master write */
+ i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_WRITE_REQUESTED, &value);
+-
+- val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++ I2C_SLAVE_WRITE_REQUESTED, &rx_data);
++ iproc_i2c->rx_start_rcvd = true;
++ iproc_i2c->slave_read_complete = false;
++ } else if (rx_status == I2C_SLAVE_RX_DATA &&
++ iproc_i2c->rx_start_rcvd) {
++ /* Middle of SMBUS Master write */
+ i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_WRITE_RECEIVED, &value);
+- } else if (status & BIT(IS_S_RD_EVENT_SHIFT)) {
+- /* Start of SMBUS for Master Read */
+- i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_READ_REQUESTED, &value);
+- iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
++ I2C_SLAVE_WRITE_RECEIVED, &rx_data);
++ } else if (rx_status == I2C_SLAVE_RX_END &&
++ iproc_i2c->rx_start_rcvd) {
++ /* End of SMBUS Master write */
++ if (iproc_i2c->slave_rx_only)
++ i2c_slave_event(iproc_i2c->slave,
++ I2C_SLAVE_WRITE_RECEIVED,
++ &rx_data);
++
++ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP,
++ &rx_data);
++ } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) {
++ iproc_i2c->rx_start_rcvd = false;
++ iproc_i2c->slave_read_complete = true;
++ break;
++ }
+
+- val = BIT(S_CMD_START_BUSY_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++ rx_bytes++;
++ }
++}
+
+- /*
+- * Enable interrupt for TX FIFO becomes empty and
+- * less than PKT_LENGTH bytes were output on the SMBUS
+- */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val |= BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+- } else {
+- /* Master write other than start */
+- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++static void slave_rx_tasklet_fn(unsigned long data)
++{
++ struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data;
++ u32 int_clr;
++
++ bcm_iproc_i2c_slave_read(iproc_i2c);
++
++ /* clear pending IS_S_RX_EVENT_SHIFT interrupt */
++ int_clr = BIT(IS_S_RX_EVENT_SHIFT);
++
++ if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) {
++ /*
++ * In case of single byte master-read request,
++ * IS_S_TX_UNDERRUN_SHIFT event is generated before
++ * IS_S_START_BUSY_SHIFT event. Hence start slave data send
++ * from first IS_S_TX_UNDERRUN_SHIFT event.
++ *
++ * This means don't send any data from slave when
++ * IS_S_RD_EVENT_SHIFT event is generated else it will increment
++ * eeprom or other backend slave driver read pointer twice.
++ */
++ iproc_i2c->tx_underrun = 0;
++ iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT);
++
++ /* clear IS_S_RD_EVENT_SHIFT interrupt */
++ int_clr |= BIT(IS_S_RD_EVENT_SHIFT);
++ }
++
++ /* clear slave interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr);
++ /* enable slave interrupts */
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask);
++}
++
++static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
++ u32 status)
++{
++ u32 val;
++ u8 value;
++
++ /*
++ * Slave events in case of master-write, master-write-read and,
++ * master-read
++ *
++ * Master-write : only IS_S_RX_EVENT_SHIFT event
++ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events
++ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events or only IS_S_RD_EVENT_SHIFT
++ */
++ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++ status & BIT(IS_S_RD_EVENT_SHIFT)) {
++ /* disable slave interrupts */
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~iproc_i2c->slave_int_mask;
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++ if (status & BIT(IS_S_RD_EVENT_SHIFT))
++ /* Master-write-read request */
++ iproc_i2c->slave_rx_only = false;
++ else
++ /* Master-write request only */
++ iproc_i2c->slave_rx_only = true;
++
++ /* schedule tasklet to read data later */
++ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++ /* clear only IS_S_RX_EVENT_SHIFT interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++ BIT(IS_S_RX_EVENT_SHIFT));
++ }
++
++ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
++ iproc_i2c->tx_underrun++;
++ if (iproc_i2c->tx_underrun == 1)
++ /* Start of SMBUS for Master Read */
+ i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_WRITE_RECEIVED, &value);
+- if (rx_status == I2C_SLAVE_RX_END)
+- i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_STOP, &value);
+- }
+- } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+- /* Master read other than start */
+- i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_READ_PROCESSED, &value);
++ I2C_SLAVE_READ_REQUESTED,
++ &value);
++ else
++ /* Master read other than start */
++ i2c_slave_event(iproc_i2c->slave,
++ I2C_SLAVE_READ_PROCESSED,
++ &value);
+
+ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
++ /* start transfer */
+ val = BIT(S_CMD_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++
++ /* clear interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++ BIT(IS_S_TX_UNDERRUN_SHIFT));
+ }
+
+- /* Stop */
++ /* Stop received from master in case of master read transaction */
+ if (status & BIT(IS_S_START_BUSY_SHIFT)) {
+- i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+ /*
+ * Enable interrupt for TX FIFO becomes empty and
+ * less than PKT_LENGTH bytes were output on the SMBUS
+ */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
++ iproc_i2c->slave_int_mask);
++
++ /* End of SMBUS for Master Read */
++ val = BIT(S_TX_WR_STATUS_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val);
++
++ val = BIT(S_CMD_START_BUSY_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++
++ /* flush TX FIFOs */
++ val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET);
++ val |= (BIT(S_FIFO_TX_FLUSH_SHIFT));
++ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
++
++ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
++
++ /* clear interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++ BIT(IS_S_START_BUSY_SHIFT));
+ }
+
+- /* clear interrupt status */
+- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
++ /* check slave transmit status only if slave is transmitting */
++ if (!iproc_i2c->slave_rx_only)
++ bcm_iproc_i2c_check_slave_status(iproc_i2c);
+
+- bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ return true;
+ }
+
+@@ -505,12 +625,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+ {
+ struct bcm_iproc_i2c_dev *iproc_i2c = data;
+- u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
++ u32 slave_status;
++ u32 status;
+ bool ret;
+- u32 sl_status = status & ISR_MASK_SLAVE;
+
+- if (sl_status) {
+- ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
++ status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
++ /* process only slave interrupt which are enabled */
++ slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) &
++ ISR_MASK_SLAVE;
++
++ if (slave_status) {
++ ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status);
+ if (ret)
+ return IRQ_HANDLED;
+ else
+@@ -1066,6 +1191,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
++
++ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
++ (unsigned long)iproc_i2c);
++
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+ return 0;
+ }
+@@ -1086,6 +1215,8 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
++ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
++
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
+index d4e0a0f6732ae..ba766d24219ef 100644
+--- a/drivers/i2c/busses/i2c-brcmstb.c
++++ b/drivers/i2c/busses/i2c-brcmstb.c
+@@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
+ goto cmd_out;
+ }
+
+- if ((CMD_RD || CMD_WR) &&
++ if ((cmd == CMD_RD || cmd == CMD_WR) &&
+ bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
+ rc = -EREMOTEIO;
+ dev_dbg(dev->device, "controller received NOACK intr for %s\n",
+diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
+index 20a9881a0d6cd..5ac30d95650cc 100644
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -606,6 +606,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
+ u32 i2c_ctl;
+ u32 int_en = 0;
+ u32 i2c_auto_conf = 0;
++ u32 i2c_addr = 0;
+ u32 fifo_ctl;
+ unsigned long flags;
+ unsigned short trig_lvl;
+@@ -640,7 +641,12 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
+ int_en |= HSI2C_INT_TX_ALMOSTEMPTY_EN;
+ }
+
+- writel(HSI2C_SLV_ADDR_MAS(i2c->msg->addr), i2c->regs + HSI2C_ADDR);
++ i2c_addr = HSI2C_SLV_ADDR_MAS(i2c->msg->addr);
++
++ if (i2c->op_clock >= I2C_MAX_FAST_MODE_PLUS_FREQ)
++ i2c_addr |= HSI2C_MASTER_ID(MASTER_ID(i2c->adap.nr));
++
++ writel(i2c_addr, i2c->regs + HSI2C_ADDR);
+
+ writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
+ writel(i2c_ctl, i2c->regs + HSI2C_CTL);
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 046d241183c58..214b4c913a139 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -86,6 +86,9 @@ struct geni_i2c_dev {
+ u32 clk_freq_out;
+ const struct geni_i2c_clk_fld *clk_fld;
+ int suspended;
++ void *dma_buf;
++ size_t xfer_len;
++ dma_addr_t dma_addr;
+ };
+
+ struct geni_i2c_err_log {
+@@ -348,14 +351,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
+ dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
+ }
+
++static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
++ struct i2c_msg *cur)
++{
++ gi2c->cur_rd = 0;
++ if (gi2c->dma_buf) {
++ if (gi2c->err)
++ geni_i2c_rx_fsm_rst(gi2c);
++ geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
++ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
++ }
++}
++
++static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
++ struct i2c_msg *cur)
++{
++ gi2c->cur_wr = 0;
++ if (gi2c->dma_buf) {
++ if (gi2c->err)
++ geni_i2c_tx_fsm_rst(gi2c);
++ geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
++ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
++ }
++}
++
+ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+ {
+- dma_addr_t rx_dma;
++ dma_addr_t rx_dma = 0;
+ unsigned long time_left;
+ void *dma_buf;
+ struct geni_se *se = &gi2c->se;
+ size_t len = msg->len;
++ struct i2c_msg *cur;
+
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+@@ -370,19 +398,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ dma_buf = NULL;
++ } else {
++ gi2c->xfer_len = len;
++ gi2c->dma_addr = rx_dma;
++ gi2c->dma_buf = dma_buf;
+ }
+
++ cur = gi2c->cur;
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+- gi2c->cur_rd = 0;
+- if (dma_buf) {
+- if (gi2c->err)
+- geni_i2c_rx_fsm_rst(gi2c);
+- geni_se_rx_dma_unprep(se, rx_dma, len);
+- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
+- }
++ geni_i2c_rx_msg_cleanup(gi2c, cur);
+
+ return gi2c->err;
+ }
+@@ -390,11 +417,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+ {
+- dma_addr_t tx_dma;
++ dma_addr_t tx_dma = 0;
+ unsigned long time_left;
+ void *dma_buf;
+ struct geni_se *se = &gi2c->se;
+ size_t len = msg->len;
++ struct i2c_msg *cur;
+
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+@@ -409,22 +437,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ dma_buf = NULL;
++ } else {
++ gi2c->xfer_len = len;
++ gi2c->dma_addr = tx_dma;
++ gi2c->dma_buf = dma_buf;
+ }
+
+ if (!dma_buf) /* Get FIFO IRQ */
+ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
+
++ cur = gi2c->cur;
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+- gi2c->cur_wr = 0;
+- if (dma_buf) {
+- if (gi2c->err)
+- geni_i2c_tx_fsm_rst(gi2c);
+- geni_se_tx_dma_unprep(se, tx_dma, len);
+- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
+- }
++ geni_i2c_tx_msg_cleanup(gi2c, cur);
+
+ return gi2c->err;
+ }
+diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
+index e68f15f4b4d0c..afff0e2320f74 100644
+--- a/drivers/i3c/master/Kconfig
++++ b/drivers/i3c/master/Kconfig
+@@ -25,6 +25,7 @@ config DW_I3C_MASTER
+ config MIPI_I3C_HCI
+ tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
+ depends on I3C
++ depends on HAS_IOMEM
+ help
+ Support for hardware following the MIPI Aliance's I3C Host Controller
+ Interface specification.
+diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
+index 77af4c1a3f38c..bb86d84558d9a 100644
+--- a/drivers/ide/falconide.c
++++ b/drivers/ide/falconide.c
+@@ -164,6 +164,7 @@ static int __init falconide_init(struct platform_device *pdev)
+ if (rc)
+ goto err_free;
+
++ platform_set_drvdata(pdev, host);
+ return 0;
+ err_free:
+ ide_host_free(host);
+@@ -174,7 +175,7 @@ err:
+
+ static int falconide_remove(struct platform_device *pdev)
+ {
+- struct ide_host *host = dev_get_drvdata(&pdev->dev);
++ struct ide_host *host = platform_get_drvdata(pdev);
+
+ ide_host_remove(host);
+
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 98165589c8ab6..be996dba040cc 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -4333,7 +4333,7 @@ static int cm_add_one(struct ib_device *ib_device)
+ unsigned long flags;
+ int ret;
+ int count = 0;
+- u8 i;
++ unsigned int i;
+
+ cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
+ GFP_KERNEL);
+@@ -4345,7 +4345,7 @@ static int cm_add_one(struct ib_device *ib_device)
+ cm_dev->going_down = 0;
+
+ set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
+- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
++ rdma_for_each_port (ib_device, i) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
+@@ -4431,7 +4431,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ .clr_port_cap_mask = IB_PORT_CM_SUP
+ };
+ unsigned long flags;
+- int i;
++ unsigned int i;
+
+ write_lock_irqsave(&cm.device_lock, flags);
+ list_del(&cm_dev->list);
+@@ -4441,7 +4441,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ cm_dev->going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
+- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
++ rdma_for_each_port (ib_device, i) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index c51b84b2d2f37..e3638f80e1d52 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -352,7 +352,13 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
+
+ struct cma_multicast {
+ struct rdma_id_private *id_priv;
+- struct ib_sa_multicast *sa_mc;
++ union {
++ struct ib_sa_multicast *sa_mc;
++ struct {
++ struct work_struct work;
++ struct rdma_cm_event event;
++ } iboe_join;
++ };
+ struct list_head list;
+ void *context;
+ struct sockaddr_storage addr;
+@@ -1823,6 +1829,8 @@ static void destroy_mc(struct rdma_id_private *id_priv,
+ cma_igmp_send(ndev, &mgid, false);
+ dev_put(ndev);
+ }
++
++ cancel_work_sync(&mc->iboe_join.work);
+ }
+ kfree(mc);
+ }
+@@ -2683,6 +2691,28 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv,
+ return (id_priv->query_id < 0) ? id_priv->query_id : 0;
+ }
+
++static void cma_iboe_join_work_handler(struct work_struct *work)
++{
++ struct cma_multicast *mc =
++ container_of(work, struct cma_multicast, iboe_join.work);
++ struct rdma_cm_event *event = &mc->iboe_join.event;
++ struct rdma_id_private *id_priv = mc->id_priv;
++ int ret;
++
++ mutex_lock(&id_priv->handler_mutex);
++ if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
++ READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
++ goto out_unlock;
++
++ ret = cma_cm_event_handler(id_priv, event);
++ WARN_ON(ret);
++
++out_unlock:
++ mutex_unlock(&id_priv->handler_mutex);
++ if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
++ rdma_destroy_ah_attr(&event->param.ud.ah_attr);
++}
++
+ static void cma_work_handler(struct work_struct *_work)
+ {
+ struct cma_work *work = container_of(_work, struct cma_work, work);
+@@ -4478,10 +4508,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+ cma_make_mc_event(status, id_priv, multicast, &event, mc);
+ ret = cma_cm_event_handler(id_priv, &event);
+ rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+- if (ret) {
+- destroy_id_handler_unlock(id_priv);
+- return 0;
+- }
++ WARN_ON(ret);
+
+ out:
+ mutex_unlock(&id_priv->handler_mutex);
+@@ -4604,7 +4631,6 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+ {
+- struct cma_work *work;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int err = 0;
+ struct sockaddr *addr = (struct sockaddr *)&mc->addr;
+@@ -4618,10 +4644,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ if (cma_zero_addr(addr))
+ return -EINVAL;
+
+- work = kzalloc(sizeof *work, GFP_KERNEL);
+- if (!work)
+- return -ENOMEM;
+-
+ gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
+ rdma_start_port(id_priv->cma_dev->device)];
+ cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
+@@ -4632,10 +4654,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+
+ if (dev_addr->bound_dev_if)
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+- if (!ndev) {
+- err = -ENODEV;
+- goto err_free;
+- }
++ if (!ndev)
++ return -ENODEV;
++
+ ib.rec.rate = iboe_get_rate(ndev);
+ ib.rec.hop_limit = 1;
+ ib.rec.mtu = iboe_get_mtu(ndev->mtu);
+@@ -4653,24 +4674,15 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ err = -ENOTSUPP;
+ }
+ dev_put(ndev);
+- if (err || !ib.rec.mtu) {
+- if (!err)
+- err = -EINVAL;
+- goto err_free;
+- }
++ if (err || !ib.rec.mtu)
++ return err ?: -EINVAL;
++
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &ib.rec.port_gid);
+- work->id = id_priv;
+- INIT_WORK(&work->work, cma_work_handler);
+- cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
+- /* Balances with cma_id_put() in cma_work_handler */
+- cma_id_get(id_priv);
+- queue_work(cma_wq, &work->work);
++ INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
++ cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
++ queue_work(cma_wq, &mc->iboe_join.work);
+ return 0;
+-
+-err_free:
+- kfree(work);
+- return err;
+ }
+
+ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 19104a6756915..dd7f3b437c6be 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+
+ mutex_lock(&file->mutex);
+
++ if (file->agents_dead) {
++ mutex_unlock(&file->mutex);
++ return -EIO;
++ }
++
+ while (list_empty(&file->recv_list)) {
+ mutex_unlock(&file->mutex);
+
+@@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ mutex_lock(&file->mutex);
+ }
+
++ if (file->agents_dead) {
++ mutex_unlock(&file->mutex);
++ return -EIO;
++ }
++
+ packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ list_del(&packet->list);
+
+@@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+
+ agent = __get_agent(file, packet->mad.hdr.id);
+ if (!agent) {
+- ret = -EINVAL;
++ ret = -EIO;
+ goto err_up;
+ }
+
+@@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+ /* we will always be able to post a MAD send */
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM;
+
++ mutex_lock(&file->mutex);
+ poll_wait(filp, &file->recv_wait, wait);
+
+ if (!list_empty(&file->recv_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
++ if (file->agents_dead)
++ mask = EPOLLERR;
++ mutex_unlock(&file->mutex);
+
+ return mask;
+ }
+@@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+ list_for_each_entry(file, &port->file_list, port_list) {
+ mutex_lock(&file->mutex);
+ file->agents_dead = 1;
++ wake_up_interruptible(&file->recv_wait);
+ mutex_unlock(&file->mutex);
+
+ for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index ad8253245a85f..54abe615b502a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -54,6 +54,7 @@
+ /* Hardware specification only for v1 engine */
+ #define HNS_ROCE_MIN_CQE_NUM 0x40
+ #define HNS_ROCE_MIN_WQE_NUM 0x20
++#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
+
+ /* Hardware specification only for v1 engine */
+ #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
+@@ -65,6 +66,8 @@
+ #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
+ #define HNS_ROCE_MIN_CQE_CNT 16
+
++#define HNS_ROCE_RESERVED_SGE 1
++
+ #define HNS_ROCE_MAX_IRQ_NUM 128
+
+ #define HNS_ROCE_SGE_IN_WQE 2
+@@ -393,6 +396,7 @@ struct hns_roce_wq {
+ spinlock_t lock;
+ u32 wqe_cnt; /* WQE num */
+ u32 max_gs;
++ u32 rsv_sge;
+ int offset;
+ int wqe_shift; /* WQE size */
+ u32 head;
+@@ -489,6 +493,8 @@ struct hns_roce_idx_que {
+ struct hns_roce_mtr mtr;
+ int entry_shift;
+ unsigned long *bitmap;
++ u32 head;
++ u32 tail;
+ };
+
+ struct hns_roce_srq {
+@@ -496,6 +502,7 @@ struct hns_roce_srq {
+ unsigned long srqn;
+ u32 wqe_cnt;
+ int max_gs;
++ u32 rsv_sge;
+ int wqe_shift;
+ void __iomem *db_reg_l;
+
+@@ -507,8 +514,6 @@ struct hns_roce_srq {
+ u64 *wrid;
+ struct hns_roce_idx_que idx_que;
+ spinlock_t lock;
+- u16 head;
+- u16 tail;
+ struct mutex mutex;
+ void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
+ };
+@@ -647,7 +652,7 @@ struct hns_roce_qp {
+ struct hns_roce_db sdb;
+ unsigned long en_flags;
+ u32 doorbell_qpn;
+- u32 sq_signal_bits;
++ enum ib_sig_type sq_signal_bits;
+ struct hns_roce_wq sq;
+
+ struct hns_roce_mtr mtr;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index edc9d6b98d954..cfd2e1b60c7f0 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -1075,9 +1075,8 @@ static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
+ return NULL;
+
+ if (exist_bt) {
+- hem->addr = dma_alloc_coherent(hr_dev->dev,
+- count * BA_BYTE_LEN,
+- &hem->dma_addr, GFP_KERNEL);
++ hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
++ &hem->dma_addr, GFP_KERNEL);
+ if (!hem->addr) {
+ kfree(hem);
+ return NULL;
+@@ -1336,6 +1335,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ if (ba_num < 1)
+ return -ENOMEM;
+
++ if (ba_num > unit)
++ return -ENOBUFS;
++
++ ba_num = min_t(int, ba_num, unit);
+ INIT_LIST_HEAD(&temp_root);
+ offset = r->offset;
+ /* indicate to last region */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+index f68585ff8e8a5..c2539a8d91116 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -43,6 +43,22 @@
+ #include "hns_roce_hem.h"
+ #include "hns_roce_hw_v1.h"
+
++/**
++ * hns_get_gid_index - Get gid index.
++ * @hr_dev: pointer to structure hns_roce_dev.
++ * @port: port, value range: 0 ~ MAX
++ * @gid_index: gid_index, value range: 0 ~ MAX
++ * Description:
++ * N ports shared gids, allocation method as follow:
++ * GID[0][0], GID[1][0],.....GID[N - 1][0],
++ * GID[0][0], GID[1][0],.....GID[N - 1][0],
++ * And so on
++ */
++u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
++{
++ return gid_index * hr_dev->caps.num_ports + port;
++}
++
+ static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
+ {
+ dseg->lkey = cpu_to_le32(sg->lkey);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 833e1f259936f..0f76e193317e6 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -741,6 +741,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ unsigned long flags;
+ void *wqe = NULL;
+ u32 wqe_idx;
++ u32 max_sge;
+ int nreq;
+ int ret;
+ int i;
+@@ -754,6 +755,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ goto out;
+ }
+
++ max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq))) {
+@@ -764,9 +766,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
+- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
++ if (unlikely(wr->num_sge > max_sge)) {
+ ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
+- wr->num_sge, hr_qp->rq.max_gs);
++ wr->num_sge, max_sge);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+@@ -781,9 +783,10 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ dseg++;
+ }
+
+- if (wr->num_sge < hr_qp->rq.max_gs) {
++ if (hr_qp->rq.rsv_sge) {
+ dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg->addr = 0;
++ dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ }
+
+ /* rq support inline data */
+@@ -846,11 +849,20 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+ spin_lock(&srq->lock);
+
+ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+- srq->tail++;
++ srq->idx_que.tail++;
+
+ spin_unlock(&srq->lock);
+ }
+
++int hns_roce_srqwq_overflow(struct hns_roce_srq *srq, int nreq)
++{
++ struct hns_roce_idx_que *idx_que = &srq->idx_que;
++ unsigned int cur;
++
++ cur = idx_que->head - idx_que->tail;
++ return cur + nreq >= srq->wqe_cnt;
++}
++
+ static int find_empty_entry(struct hns_roce_idx_que *idx_que,
+ unsigned long size)
+ {
+@@ -879,22 +891,27 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ __le32 *srq_idx;
+ int ret = 0;
+ int wqe_idx;
++ u32 max_sge;
+ void *wqe;
+ int nreq;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+- ind = srq->head & (srq->wqe_cnt - 1);
++ ind = srq->idx_que.head & (srq->wqe_cnt - 1);
++ max_sge = srq->max_gs - srq->rsv_sge;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+- if (unlikely(wr->num_sge >= srq->max_gs)) {
++ if (unlikely(wr->num_sge > max_sge)) {
++ ibdev_err(&hr_dev->ib_dev,
++ "srq: num_sge = %d, max_sge = %u.\n",
++ wr->num_sge, max_sge);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+- if (unlikely(srq->head == srq->tail)) {
++ if (unlikely(hns_roce_srqwq_overflow(srq, nreq))) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+@@ -916,9 +933,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
+ }
+
+- if (wr->num_sge < srq->max_gs) {
+- dseg[i].len = 0;
+- dseg[i].lkey = cpu_to_le32(0x100);
++ if (srq->rsv_sge) {
++ dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
++ dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].addr = 0;
+ }
+
+@@ -930,7 +947,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ }
+
+ if (likely(nreq)) {
+- srq->head += nreq;
++ srq->idx_que.head += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+@@ -942,7 +959,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
+ (srq->srqn & V2_DB_BYTE_4_TAG_M));
+ srq_db.parameter =
+- cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
++ cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
+
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ }
+@@ -1247,7 +1264,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ u32 timeout = 0;
+ int handle = 0;
+ u16 desc_ret;
+- int ret = 0;
++ int ret;
+ int ntc;
+
+ spin_lock_bh(&csq->lock);
+@@ -1292,15 +1309,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ if (hns_roce_cmq_csq_done(hr_dev)) {
+ complete = true;
+ handle = 0;
++ ret = 0;
+ while (handle < num) {
+ /* get the result of hardware write back */
+ desc_to_use = &csq->desc[ntc];
+ desc[handle] = *desc_to_use;
+ dev_dbg(hr_dev->dev, "Get cmq desc:\n");
+ desc_ret = le16_to_cpu(desc[handle].retval);
+- if (desc_ret == CMD_EXEC_SUCCESS)
+- ret = 0;
+- else
++ if (unlikely(desc_ret != CMD_EXEC_SUCCESS))
+ ret = -EIO;
+ priv->cmq.last_status = desc_ret;
+ ntc++;
+@@ -1866,7 +1882,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
+
+ caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+- HNS_ROCE_CAP_FLAG_RQ_INLINE |
+ HNS_ROCE_CAP_FLAG_RECORD_DB |
+ HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+
+@@ -1999,10 +2014,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
+ caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
+ caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
+ caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
++ caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
+ caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
+ caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
+ caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
+ caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
++ caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
+ caps->num_aeq_vectors = resp_a->num_aeq_vectors;
+ caps->num_other_vectors = resp_a->num_other_vectors;
+ caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
+@@ -4235,7 +4252,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+ {
+- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+@@ -4243,7 +4259,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ dma_addr_t irrl_ba;
+ enum ib_mtu mtu;
+ u8 lp_pktn_ini;
+- u8 port_num;
+ u64 *mtts;
+ u8 *dmac;
+ u8 *smac;
+@@ -4324,15 +4339,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+ }
+
+- /* Configure GID index */
+- port_num = rdma_ah_get_port_num(&attr->ah_attr);
+- roce_set_field(context->byte_20_smac_sgid_idx,
+- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
+- hns_get_gid_index(hr_dev, port_num - 1,
+- grh->sgid_index));
+- roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+- V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+-
+ memcpy(&(context->dmac), dmac, sizeof(u32));
+ roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+ V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
+@@ -5083,7 +5089,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
++ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+@@ -5331,7 +5337,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ return -EINVAL;
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+- if (srq_attr->srq_limit >= srq->wqe_cnt)
++ if (srq_attr->srq_limit > srq->wqe_cnt)
+ return -EINVAL;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+@@ -5394,8 +5400,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S);
+
+ attr->srq_limit = limit_wl;
+- attr->max_wr = srq->wqe_cnt - 1;
+- attr->max_sge = srq->max_gs;
++ attr->max_wr = srq->wqe_cnt;
++ attr->max_sge = srq->max_gs - srq->rsv_sge;
+
+ out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index bdaccf86460dd..09d88d97a7ff9 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -96,7 +96,8 @@
+ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+-#define HNS_ROCE_INVALID_LKEY 0x100
++#define HNS_ROCE_INVALID_LKEY 0x0
++#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
+ #define HNS_ROCE_CMQ_TX_TIMEOUT 30000
+ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+ #define HNS_ROCE_V2_RSV_QPS 8
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index d9179bae4989d..baadb12b13752 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -42,22 +42,6 @@
+ #include "hns_roce_device.h"
+ #include "hns_roce_hem.h"
+
+-/**
+- * hns_get_gid_index - Get gid index.
+- * @hr_dev: pointer to structure hns_roce_dev.
+- * @port: port, value range: 0 ~ MAX
+- * @gid_index: gid_index, value range: 0 ~ MAX
+- * Description:
+- * N ports shared gids, allocation method as follow:
+- * GID[0][0], GID[1][0],.....GID[N - 1][0],
+- * GID[0][0], GID[1][0],.....GID[N - 1][0],
+- * And so on
+- */
+-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
+-{
+- return gid_index * hr_dev->caps.num_ports + port;
+-}
+-
+ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+ {
+ u8 phy_port;
+@@ -772,8 +756,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+ return 0;
+
+ err_qp_table_free:
+- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+- hns_roce_cleanup_qp_table(hr_dev);
++ hns_roce_cleanup_qp_table(hr_dev);
+
+ err_cq_table_free:
+ hns_roce_cleanup_cq_table(hr_dev);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 1bcffd93ff3e3..1e0465f05b7da 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -631,30 +631,26 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
+ }
+
+ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+- dma_addr_t *pages, struct hns_roce_buf_region *region)
++ struct hns_roce_buf_region *region, dma_addr_t *pages,
++ int max_count)
+ {
++ int count, npage;
++ int offset, end;
+ __le64 *mtts;
+- int offset;
+- int count;
+- int npage;
+ u64 addr;
+- int end;
+ int i;
+
+- /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
+- if (!region->hopnum)
+- return 0;
+-
+ offset = region->offset;
+ end = offset + region->count;
+ npage = 0;
+- while (offset < end) {
++ while (offset < end && npage < max_count) {
++ count = 0;
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ offset, &count, NULL);
+ if (!mtts)
+ return -ENOBUFS;
+
+- for (i = 0; i < count; i++) {
++ for (i = 0; i < count && npage < max_count; i++) {
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ addr = to_hr_hw_page_addr(pages[npage]);
+ else
+@@ -666,7 +662,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ offset += count;
+ }
+
+- return 0;
++ return npage;
+ }
+
+ static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
+@@ -833,8 +829,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ {
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_region *r;
+- unsigned int i;
+- int err;
++ unsigned int i, mapped_cnt;
++ int ret;
+
+ /*
+ * Only use the first page address as root ba when hopnum is 0, this
+@@ -845,26 +841,42 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ return 0;
+ }
+
+- for (i = 0; i < mtr->hem_cfg.region_count; i++) {
++ for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
++ mapped_cnt < page_cnt; i++) {
+ r = &mtr->hem_cfg.region[i];
++ /* if hopnum is 0, no need to map pages in this region */
++ if (!r->hopnum) {
++ mapped_cnt += r->count;
++ continue;
++ }
++
+ if (r->offset + r->count > page_cnt) {
+- err = -EINVAL;
++ ret = -EINVAL;
+ ibdev_err(ibdev,
+ "failed to check mtr%u end %u + %u, max %u.\n",
+ i, r->offset, r->count, page_cnt);
+- return err;
++ return ret;
+ }
+
+- err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
+- if (err) {
++ ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
++ page_cnt - mapped_cnt);
++ if (ret < 0) {
+ ibdev_err(ibdev,
+ "failed to map mtr%u offset %u, ret = %d.\n",
+- i, r->offset, err);
+- return err;
++ i, r->offset, ret);
++ return ret;
+ }
++ mapped_cnt += ret;
++ ret = 0;
+ }
+
+- return 0;
++ if (mapped_cnt < page_cnt) {
++ ret = -ENOBUFS;
++ ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
++ mapped_cnt, page_cnt);
++ }
++
++ return ret;
+ }
+
+ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 1116371adf74f..8695c96e66964 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -413,9 +413,32 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+ mutex_unlock(&hr_dev->qp_table.bank_mutex);
+ }
+
++static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
++ bool user)
++{
++ u32 max_sge = dev->caps.max_rq_sg;
++
++ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++ return max_sge;
++
++ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
++ * calculate number of max_sge with reserved SGEs when allocating wqe
++ * buf, so there is no need to do this again in kernel. But the number
++ * may exceed the capacity of SGEs recorded in the firmware, so the
++ * kernel driver should just adapt the value accordingly.
++ */
++ if (user)
++ max_sge = roundup_pow_of_two(max_sge + 1);
++ else
++ hr_qp->rq.rsv_sge = 1;
++
++ return max_sge;
++}
++
+ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+- struct hns_roce_qp *hr_qp, int has_rq)
++ struct hns_roce_qp *hr_qp, int has_rq, bool user)
+ {
++ u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
+ u32 cnt;
+
+ /* If srq exist, set zero for relative number of rq */
+@@ -431,8 +454,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+
+ /* Check the validity of QP support capacity */
+ if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
+- cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
+- ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
++ cap->max_recv_sge > max_sge) {
++ ibdev_err(&hr_dev->ib_dev,
++ "RQ config error, depth = %u, sge = %u\n",
+ cap->max_recv_wr, cap->max_recv_sge);
+ return -EINVAL;
+ }
+@@ -444,7 +468,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ return -EINVAL;
+ }
+
+- hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
++ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
++ hr_qp->rq.rsv_sge);
+
+ if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+@@ -459,7 +484,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
+
+ cap->max_recv_wr = cnt;
+- cap->max_recv_sge = hr_qp->rq.max_gs;
++ cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+
+ return 0;
+ }
+@@ -919,7 +944,7 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
+
+ ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
+- hns_roce_qp_has_rq(init_attr));
++ hns_roce_qp_has_rq(init_attr), !!udata);
+ if (ret) {
+ ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
+ ret);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index c4ae57e4173a1..51de9305bb4de 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2018 Hisilicon Limited.
+ */
+
++#include
+ #include
+ #include "hns_roce_device.h"
+ #include "hns_roce_cmd.h"
+@@ -246,6 +247,9 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ }
+ }
+
++ idx_que->head = 0;
++ idx_que->tail = 0;
++
+ return 0;
+ err_idx_mtr:
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
+@@ -264,8 +268,6 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+
+ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+ {
+- srq->head = 0;
+- srq->tail = srq->wqe_cnt - 1;
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (!srq->wrid)
+ return -ENOMEM;
+@@ -279,6 +281,28 @@ static void free_srq_wrid(struct hns_roce_srq *srq)
+ srq->wrid = NULL;
+ }
+
++static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
++ bool user)
++{
++ u32 max_sge = dev->caps.max_srq_sges;
++
++ if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
++ return max_sge;
++
++ /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
++ * calculate number of max_sge with reserved SGEs when allocating wqe
++ * buf, so there is no need to do this again in kernel. But the number
++ * may exceed the capacity of SGEs recorded in the firmware, so the
++ * kernel driver should just adapt the value accordingly.
++ */
++ if (user)
++ max_sge = roundup_pow_of_two(max_sge + 1);
++ else
++ hr_srq->rsv_sge = 1;
++
++ return max_sge;
++}
++
+ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+@@ -288,6 +312,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ struct hns_roce_srq *srq = to_hr_srq(ib_srq);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_srq ucmd = {};
++ u32 max_sge;
+ int ret;
+ u32 cqn;
+
+@@ -295,16 +320,27 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ init_attr->srq_type != IB_SRQT_XRC)
+ return -EOPNOTSUPP;
+
+- /* Check the actual SRQ wqe and SRQ sge num */
+- if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+- init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
++ max_sge = proc_srq_sge(hr_dev, srq, !!udata);
++
++ if (init_attr->attr.max_wr > hr_dev->caps.max_srq_wrs ||
++ init_attr->attr.max_sge > max_sge) {
++ ibdev_err(&hr_dev->ib_dev,
++ "SRQ config error, depth = %u, sge = %d\n",
++ init_attr->attr.max_wr, init_attr->attr.max_sge);
+ return -EINVAL;
++ }
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+
+- srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+- srq->max_gs = init_attr->attr.max_sge;
++ init_attr->attr.max_wr = max_t(u32, init_attr->attr.max_wr,
++ HNS_ROCE_MIN_SRQ_WQE_NUM);
++ srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr);
++ srq->max_gs =
++ roundup_pow_of_two(init_attr->attr.max_sge + srq->rsv_sge);
++ init_attr->attr.max_wr = srq->wqe_cnt;
++ init_attr->attr.max_sge = srq->max_gs;
++ init_attr->attr.srq_limit = 0;
+
+ if (udata) {
+ ret = ib_copy_from_udata(&ucmd, udata,
+@@ -351,6 +387,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+
+ srq->event = hns_roce_ib_srq_event;
+ resp.srqn = srq->srqn;
++ srq->max_gs = init_attr->attr.max_sge;
++ init_attr->attr.max_sge = srq->max_gs - srq->rsv_sge;
+
+ if (udata) {
+ ret = ib_copy_to_udata(udata, &resp,
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 819c142857d65..ff8e17d7f7ca8 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -1064,7 +1064,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
++ *obj_id = MLX5_GET(create_tir_out, out, tirn);
++ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
++ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index aabdc07e47537..3562e69eacb14 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3927,7 +3927,7 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+ mlx5_ib_cleanup_multiport_master(dev);
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ cleanup_srcu_struct(&dev->odp_srcu);
+-
++ mutex_destroy(&dev->cap_mask_mutex);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
+ WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+ }
+@@ -3978,6 +3978,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+ dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
+
++ err = init_srcu_struct(&dev->odp_srcu);
++ if (err)
++ goto err_mp;
++
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+@@ -3987,17 +3991,11 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
+-
+- err = init_srcu_struct(&dev->odp_srcu);
+- if (err)
+- goto err_mp;
+-
+ return 0;
+
+ err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+-
+- return -ENOMEM;
++ return err;
+ }
+
+ static int mlx5_ib_enable_driver(struct ib_device *dev)
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 0cb7cc642d87d..bab40ad527dae 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2432,9 +2432,6 @@ static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_DRIVER:
+ case IB_QPT_GSI:
+- if (dev->profile == &raw_eth_profile)
+- goto out;
+- fallthrough;
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_UD:
+ case MLX5_IB_QPT_REG_UMR:
+@@ -2629,10 +2626,6 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ int create_flags = attr->create_flags;
+ bool cond;
+
+- if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
+- if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
+- return -EINVAL;
+-
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return (create_flags) ? -EINVAL : 0;
+
+@@ -4211,6 +4204,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ return 0;
+ }
+
++static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
++ struct mlx5_ib_qp *qp,
++ enum ib_qp_type qp_type)
++{
++ if (dev->profile != &raw_eth_profile)
++ return true;
++
++ if (qp_type == IB_QPT_RAW_PACKET || qp_type == MLX5_IB_QPT_REG_UMR)
++ return true;
++
++ /* Internal QP used for wc testing, with NOPs in wq */
++ if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
++ return true;
++
++ return false;
++}
++
+ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+ {
+@@ -4223,6 +4233,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int err = -EINVAL;
+ int port;
+
++ if (!mlx5_ib_modify_qp_allowed(dev, qp, ibqp->qp_type))
++ return -EOPNOTSUPP;
++
+ if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
+ return -EOPNOTSUPP;
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 943914c2a50c7..bce44502ab0ed 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -414,6 +414,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+
+ void rxe_loopback(struct sk_buff *skb)
+ {
++ if (skb->protocol == htons(ETH_P_IP))
++ skb_pull(skb, sizeof(struct iphdr));
++ else
++ skb_pull(skb, sizeof(struct ipv6hdr));
++
+ rxe_rcv(skb);
+ }
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
+index c9984a28eecc7..cb69a125e2806 100644
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -9,21 +9,26 @@
+ #include "rxe.h"
+ #include "rxe_loc.h"
+
++/* check that QP matches packet opcode type and is in a valid state */
+ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+ {
++ unsigned int pkt_type;
++
+ if (unlikely(!qp->valid))
+ goto err1;
+
++ pkt_type = pkt->opcode & 0xe0;
++
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+- if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
++ if (unlikely(pkt_type != IB_OPCODE_RC)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ case IB_QPT_UC:
+- if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
++ if (unlikely(pkt_type != IB_OPCODE_UC)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+@@ -31,7 +36,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+- if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
++ if (unlikely(pkt_type != IB_OPCODE_UD)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+@@ -252,7 +257,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+
+ list_for_each_entry(mce, &mcg->qp_list, qp_list) {
+ qp = mce->qp;
+- pkt = SKB_TO_PKT(skb);
+
+ /* validate qp for incoming packet */
+ err = check_type_state(rxe, pkt, qp);
+@@ -264,12 +268,18 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ continue;
+
+ /* for all but the last qp create a new clone of the
+- * skb and pass to the qp.
++ * skb and pass to the qp. If an error occurs in the
++ * checks for the last qp in the list we need to
++ * free the skb since it hasn't been passed on to
++ * rxe_rcv_pkt() which would free it later.
+ */
+- if (mce->qp_list.next != &mcg->qp_list)
++ if (mce->qp_list.next != &mcg->qp_list) {
+ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
+- else
++ } else {
+ per_qp_skb = skb;
++ /* show we have consumed the skb */
++ skb = NULL;
++ }
+
+ if (unlikely(!per_qp_skb))
+ continue;
+@@ -284,9 +294,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+
+ rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+
+- return;
+-
+ err1:
++ /* free skb if not consumed */
+ kfree_skb(skb);
+ }
+
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index adda789962196..368959ae9a8cc 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -653,7 +653,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
+ {
+ struct siw_sqe *orq_e = orq_get_tail(qp);
+
+- if (orq_e && READ_ONCE(orq_e->flags) == 0)
++ if (READ_ONCE(orq_e->flags) == 0)
+ return orq_e;
+
+ return NULL;
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index ee95cf29179d2..41c46dfaebf66 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -135,7 +135,7 @@ static struct {
+
+ static int siw_init_cpulist(void)
+ {
+- int i, num_nodes = num_possible_nodes();
++ int i, num_nodes = nr_node_ids;
+
+ memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
+
+diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
+index 875d36d4b1c61..ddb2e66f9f133 100644
+--- a/drivers/infiniband/sw/siw/siw_qp.c
++++ b/drivers/infiniband/sw/siw/siw_qp.c
+@@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk)
+
+ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
+ {
+- irq_size = roundup_pow_of_two(irq_size);
+- orq_size = roundup_pow_of_two(orq_size);
+-
+- qp->attrs.irq_size = irq_size;
+- qp->attrs.orq_size = orq_size;
+-
+- qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+- if (!qp->irq) {
+- siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size);
+- qp->attrs.irq_size = 0;
+- return -ENOMEM;
++ if (irq_size) {
++ irq_size = roundup_pow_of_two(irq_size);
++ qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
++ if (!qp->irq) {
++ qp->attrs.irq_size = 0;
++ return -ENOMEM;
++ }
+ }
+- qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+- if (!qp->orq) {
+- siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size);
+- qp->attrs.orq_size = 0;
+- qp->attrs.irq_size = 0;
+- vfree(qp->irq);
+- return -ENOMEM;
++ if (orq_size) {
++ orq_size = roundup_pow_of_two(orq_size);
++ qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
++ if (!qp->orq) {
++ qp->attrs.orq_size = 0;
++ qp->attrs.irq_size = 0;
++ vfree(qp->irq);
++ return -ENOMEM;
++ }
+ }
++ qp->attrs.irq_size = irq_size;
++ qp->attrs.orq_size = orq_size;
+ siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
+ return 0;
+ }
+@@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
+ if (ctrl & MPA_V2_RDMA_WRITE_RTR)
+ wqe->sqe.opcode = SIW_OP_WRITE;
+ else if (ctrl & MPA_V2_RDMA_READ_RTR) {
+- struct siw_sqe *rreq;
++ struct siw_sqe *rreq = NULL;
+
+ wqe->sqe.opcode = SIW_OP_READ;
+
+ spin_lock(&qp->orq_lock);
+
+- rreq = orq_get_free(qp);
++ if (qp->attrs.orq_size)
++ rreq = orq_get_free(qp);
+ if (rreq) {
+ siw_read_to_orq(rreq, &wqe->sqe);
+ qp->orq_put++;
+@@ -877,135 +878,88 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
+ rreq->num_sge = 1;
+ }
+
+-/*
+- * Must be called with SQ locked.
+- * To avoid complete SQ starvation by constant inbound READ requests,
+- * the active IRQ will not be served after qp->irq_burst, if the
+- * SQ has pending work.
+- */
+-int siw_activate_tx(struct siw_qp *qp)
++static int siw_activate_tx_from_sq(struct siw_qp *qp)
+ {
+- struct siw_sqe *irqe, *sqe;
++ struct siw_sqe *sqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+ int rv = 1;
+
+- irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
+-
+- if (irqe->flags & SIW_WQE_VALID) {
+- sqe = sq_get_next(qp);
+-
+- /*
+- * Avoid local WQE processing starvation in case
+- * of constant inbound READ request stream
+- */
+- if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
+- qp->irq_burst = 0;
+- goto skip_irq;
+- }
+- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+- wqe->wr_status = SIW_WR_QUEUED;
+-
+- /* start READ RESPONSE */
+- wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
+- wqe->sqe.flags = 0;
+- if (irqe->num_sge) {
+- wqe->sqe.num_sge = 1;
+- wqe->sqe.sge[0].length = irqe->sge[0].length;
+- wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
+- wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
+- } else {
+- wqe->sqe.num_sge = 0;
+- }
+-
+- /* Retain original RREQ's message sequence number for
+- * potential error reporting cases.
+- */
+- wqe->sqe.sge[1].length = irqe->sge[1].length;
+-
+- wqe->sqe.rkey = irqe->rkey;
+- wqe->sqe.raddr = irqe->raddr;
++ sqe = sq_get_next(qp);
++ if (!sqe)
++ return 0;
+
+- wqe->processed = 0;
+- qp->irq_get++;
++ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
++ wqe->wr_status = SIW_WR_QUEUED;
+
+- /* mark current IRQ entry free */
+- smp_store_mb(irqe->flags, 0);
++ /* First copy SQE to kernel private memory */
++ memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+
++ if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
++ rv = -EINVAL;
+ goto out;
+ }
+- sqe = sq_get_next(qp);
+- if (sqe) {
+-skip_irq:
+- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+- wqe->wr_status = SIW_WR_QUEUED;
+-
+- /* First copy SQE to kernel private memory */
+- memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+-
+- if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
++ if (wqe->sqe.flags & SIW_WQE_INLINE) {
++ if (wqe->sqe.opcode != SIW_OP_SEND &&
++ wqe->sqe.opcode != SIW_OP_WRITE) {
+ rv = -EINVAL;
+ goto out;
+ }
+- if (wqe->sqe.flags & SIW_WQE_INLINE) {
+- if (wqe->sqe.opcode != SIW_OP_SEND &&
+- wqe->sqe.opcode != SIW_OP_WRITE) {
+- rv = -EINVAL;
+- goto out;
+- }
+- if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
+- rv = -EINVAL;
+- goto out;
+- }
+- wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
+- wqe->sqe.sge[0].lkey = 0;
+- wqe->sqe.num_sge = 1;
++ if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
++ rv = -EINVAL;
++ goto out;
+ }
+- if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
+- /* A READ cannot be fenced */
+- if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
+- wqe->sqe.opcode ==
+- SIW_OP_READ_LOCAL_INV)) {
+- siw_dbg_qp(qp, "cannot fence read\n");
+- rv = -EINVAL;
+- goto out;
+- }
+- spin_lock(&qp->orq_lock);
++ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
++ wqe->sqe.sge[0].lkey = 0;
++ wqe->sqe.num_sge = 1;
++ }
++ if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
++ /* A READ cannot be fenced */
++ if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
++ wqe->sqe.opcode ==
++ SIW_OP_READ_LOCAL_INV)) {
++ siw_dbg_qp(qp, "cannot fence read\n");
++ rv = -EINVAL;
++ goto out;
++ }
++ spin_lock(&qp->orq_lock);
+
+- if (!siw_orq_empty(qp)) {
+- qp->tx_ctx.orq_fence = 1;
+- rv = 0;
+- }
+- spin_unlock(&qp->orq_lock);
++ if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
++ qp->tx_ctx.orq_fence = 1;
++ rv = 0;
++ }
++ spin_unlock(&qp->orq_lock);
+
+- } else if (wqe->sqe.opcode == SIW_OP_READ ||
+- wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+- struct siw_sqe *rreq;
++ } else if (wqe->sqe.opcode == SIW_OP_READ ||
++ wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
++ struct siw_sqe *rreq;
+
+- wqe->sqe.num_sge = 1;
++ if (unlikely(!qp->attrs.orq_size)) {
++ /* We negotiated not to send READ req's */
++ rv = -EINVAL;
++ goto out;
++ }
++ wqe->sqe.num_sge = 1;
+
+- spin_lock(&qp->orq_lock);
++ spin_lock(&qp->orq_lock);
+
+- rreq = orq_get_free(qp);
+- if (rreq) {
+- /*
+- * Make an immediate copy in ORQ to be ready
+- * to process loopback READ reply
+- */
+- siw_read_to_orq(rreq, &wqe->sqe);
+- qp->orq_put++;
+- } else {
+- qp->tx_ctx.orq_fence = 1;
+- rv = 0;
+- }
+- spin_unlock(&qp->orq_lock);
++ rreq = orq_get_free(qp);
++ if (rreq) {
++ /*
++ * Make an immediate copy in ORQ to be ready
++ * to process loopback READ reply
++ */
++ siw_read_to_orq(rreq, &wqe->sqe);
++ qp->orq_put++;
++ } else {
++ qp->tx_ctx.orq_fence = 1;
++ rv = 0;
+ }
+-
+- /* Clear SQE, can be re-used by application */
+- smp_store_mb(sqe->flags, 0);
+- qp->sq_get++;
+- } else {
+- rv = 0;
++ spin_unlock(&qp->orq_lock);
+ }
++
++ /* Clear SQE, can be re-used by application */
++ smp_store_mb(sqe->flags, 0);
++ qp->sq_get++;
+ out:
+ if (unlikely(rv < 0)) {
+ siw_dbg_qp(qp, "error %d\n", rv);
+@@ -1014,6 +968,65 @@ out:
+ return rv;
+ }
+
++/*
++ * Must be called with SQ locked.
++ * To avoid complete SQ starvation by constant inbound READ requests,
++ * the active IRQ will not be served after qp->irq_burst, if the
++ * SQ has pending work.
++ */
++int siw_activate_tx(struct siw_qp *qp)
++{
++ struct siw_sqe *irqe;
++ struct siw_wqe *wqe = tx_wqe(qp);
++
++ if (!qp->attrs.irq_size)
++ return siw_activate_tx_from_sq(qp);
++
++ irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
++
++ if (!(irqe->flags & SIW_WQE_VALID))
++ return siw_activate_tx_from_sq(qp);
++
++ /*
++ * Avoid local WQE processing starvation in case
++ * of constant inbound READ request stream
++ */
++ if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
++ qp->irq_burst = 0;
++ return siw_activate_tx_from_sq(qp);
++ }
++ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
++ wqe->wr_status = SIW_WR_QUEUED;
++
++ /* start READ RESPONSE */
++ wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
++ wqe->sqe.flags = 0;
++ if (irqe->num_sge) {
++ wqe->sqe.num_sge = 1;
++ wqe->sqe.sge[0].length = irqe->sge[0].length;
++ wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
++ wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
++ } else {
++ wqe->sqe.num_sge = 0;
++ }
++
++ /* Retain original RREQ's message sequence number for
++ * potential error reporting cases.
++ */
++ wqe->sqe.sge[1].length = irqe->sge[1].length;
++
++ wqe->sqe.rkey = irqe->rkey;
++ wqe->sqe.raddr = irqe->raddr;
++
++ wqe->processed = 0;
++ qp->irq_get++;
++
++ /* mark current IRQ entry free */
++ smp_store_mb(irqe->flags, 0);
++
++ return 1;
++}
++
+ /*
+ * Check if current CQ state qualifies for calling CQ completion
+ * handler. Must be called with CQ lock held.
+diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
+index 4bd1f1f84057b..60116f20653c7 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
+@@ -680,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+ }
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
++ if (unlikely(!qp->attrs.irq_size)) {
++ run_sq = 0;
++ goto error_irq;
++ }
+ if (tx_work->wr_status == SIW_WR_IDLE) {
+ /*
+ * immediately schedule READ response w/o
+@@ -712,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+ /* RRESP now valid as current TX wqe or placed into IRQ */
+ smp_store_mb(resp->flags, SIW_WQE_VALID);
+ } else {
+- pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp),
+- qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size);
++error_irq:
++ pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
++ qp_id(qp), qp->attrs.irq_size);
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_REMOTE_OPERATION,
+@@ -740,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp)
+ struct siw_sqe *orqe;
+ struct siw_wqe *wqe = NULL;
+
++ if (unlikely(!qp->attrs.orq_size))
++ return -EPROTO;
++
+ /* make sure ORQ indices are current */
+ smp_mb();
+
+@@ -796,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp)
+ */
+ rv = siw_orqe_start_rx(qp);
+ if (rv) {
+- pr_warn("siw: [QP %u]: ORQ empty at idx %d\n",
+- qp_id(qp), qp->orq_get % qp->attrs.orq_size);
++ pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
++ qp_id(qp), qp->attrs.orq_size);
+ goto error_term;
+ }
+ rv = siw_rresp_check_ntoh(srx, frx);
+@@ -1290,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
+ wc_status);
+ siw_wqe_put_mem(wqe, SIW_OP_READ);
+
+- if (!error)
++ if (!error) {
+ rv = siw_check_tx_fence(qp);
+- else
+- /* Disable current ORQ eleement */
+- WRITE_ONCE(orq_get_current(qp)->flags, 0);
++ } else {
++ /* Disable current ORQ element */
++ if (qp->attrs.orq_size)
++ WRITE_ONCE(orq_get_current(qp)->flags, 0);
++ }
+ break;
+
+ case RDMAP_RDMA_READ_REQ:
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index d19d8325588b5..7989c4043db4e 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -1107,8 +1107,8 @@ next_wqe:
+ /*
+ * RREQ may have already been completed by inbound RRESP!
+ */
+- if (tx_type == SIW_OP_READ ||
+- tx_type == SIW_OP_READ_LOCAL_INV) {
++ if ((tx_type == SIW_OP_READ ||
++ tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
+ /* Cleanup pending entry in ORQ */
+ qp->orq_put--;
+ qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 68fd053fc7748..e389d44e5591d 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -365,13 +365,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ if (rv)
+ goto err_out;
+
++ num_sqe = attrs->cap.max_send_wr;
++ num_rqe = attrs->cap.max_recv_wr;
++
+ /* All queue indices are derived from modulo operations
+ * on a free running 'get' (consumer) and 'put' (producer)
+ * unsigned counter. Having queue sizes at power of two
+ * avoids handling counter wrap around.
+ */
+- num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
+- num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
++ if (num_sqe)
++ num_sqe = roundup_pow_of_two(num_sqe);
++ else {
++ /* Zero sized SQ is not supported */
++ rv = -EINVAL;
++ goto err_out;
++ }
++ if (num_rqe)
++ num_rqe = roundup_pow_of_two(num_rqe);
+
+ if (udata)
+ qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+@@ -379,7 +389,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
+
+ if (qp->sendq == NULL) {
+- siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+@@ -413,7 +422,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
+
+ if (qp->recvq == NULL) {
+- siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+@@ -966,9 +974,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ unsigned long flags;
+ int rv = 0;
+
+- if (qp->srq) {
++ if (qp->srq || qp->attrs.rq_size == 0) {
+ *bad_wr = wr;
+- return -EOPNOTSUPP; /* what else from errno.h? */
++ return -EINVAL;
+ }
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+index ba00f0de14caa..ad77659800cd2 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+@@ -408,6 +408,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
++ kobject_put(&sess->kobj);
+ return err;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+@@ -419,6 +420,7 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+ &sess->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
++ kobject_put(&sess->stats->kobj_stats);
+ goto remove_group;
+ }
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+index 67f86c405a265..785cd1cf2a402 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+@@ -31,6 +31,8 @@
+ */
+ #define RTRS_RECONNECT_SEED 8
+
++#define FIRST_CONN 0x01
++
+ MODULE_DESCRIPTION("RDMA Transport Client");
+ MODULE_LICENSE("GPL");
+
+@@ -1511,7 +1513,7 @@ static void destroy_con(struct rtrs_clt_con *con)
+ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ {
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+- u16 wr_queue_size;
++ u32 max_send_wr, max_recv_wr, cq_size;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+@@ -1523,7 +1525,8 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state
+ */
+- wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
++ max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
++ max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
+ /* We must be the first here */
+ if (WARN_ON(sess->s.dev))
+ return -EINVAL;
+@@ -1555,25 +1558,29 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
+
+ /* Shared between connections */
+ sess->s.dev_ref++;
+- wr_queue_size =
++ max_send_wr =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ sess->queue_depth * 3 + 1);
++ max_recv_wr =
++ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
++ sess->queue_depth * 3 + 1);
+ }
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+- con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
++ con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+- con->queue_size = wr_queue_size;
++ con->queue_size = max_recv_wr;
+ }
++ cq_size = max_send_wr + max_recv_wr;
+ cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
+ err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
+- cq_vector, wr_queue_size, wr_queue_size,
+- IB_POLL_SOFTIRQ);
++ cq_vector, cq_size, max_send_wr,
++ max_recv_wr, IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+@@ -1657,6 +1664,7 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+ .cid_num = cpu_to_le16(sess->s.con_num),
+ .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ };
++ msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
+ uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+@@ -1742,6 +1750,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ scnprintf(sess->hca_name, sizeof(sess->hca_name),
+ sess->s.dev->ib_dev->name);
+ sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
++ /* set for_new_clt, to allow future reconnect on any path */
++ sess->for_new_clt = 1;
+ }
+
+ return 0;
+@@ -2565,11 +2575,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ err = dev_set_name(&clt->dev, "%s", sessname);
+- if (err) {
+- free_percpu(clt->pcpu_path);
+- kfree(clt);
+- return ERR_PTR(err);
+- }
++ if (err)
++ goto err;
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+@@ -2577,29 +2584,31 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_register(&clt->dev);
+ if (err) {
+- free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+- return ERR_PTR(err);
++ goto err;
+ }
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+- free_percpu(clt->pcpu_path);
+- device_unregister(&clt->dev);
+- return NULL;
++ err = -ENOMEM;
++ goto err_dev;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+- free_percpu(clt->pcpu_path);
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+- device_unregister(&clt->dev);
+- return ERR_PTR(err);
++ goto err_dev;
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
++err_dev:
++ device_unregister(&clt->dev);
++err:
++ free_percpu(clt->pcpu_path);
++ kfree(clt);
++ return ERR_PTR(err);
+ }
+
+ static void wait_for_inflight_permits(struct rtrs_clt *clt)
+@@ -2672,6 +2681,8 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ err = PTR_ERR(sess);
+ goto close_all_sess;
+ }
++ if (!i)
++ sess->for_new_clt = 1;
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+
+ err = init_sess(sess);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+index b8dbd701b3cb2..7c9e155027969 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+@@ -143,6 +143,7 @@ struct rtrs_clt_sess {
+ int max_send_sge;
+ u32 flags;
+ struct kobject kobj;
++ u8 for_new_clt;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+index 3f2918671dbed..8caad0a2322bf 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
++++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+@@ -188,7 +188,9 @@ struct rtrs_msg_conn_req {
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+- u8 reserved[12];
++ u8 first_conn : 1;
++ u8 reserved_bits : 7;
++ u8 reserved[11];
+ };
+
+ /**
+@@ -303,8 +305,9 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ struct ib_send_wr *head);
+
+ int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
+- u32 max_send_sge, int cq_vector, u16 cq_size,
+- u16 wr_queue_size, enum ib_poll_context poll_ctx);
++ u32 max_send_sge, int cq_vector, int cq_size,
++ u32 max_send_wr, u32 max_recv_wr,
++ enum ib_poll_context poll_ctx);
+ void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+ void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+index d2edff3b8f0df..126a96e75c621 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+@@ -51,6 +51,8 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
++ /* first remove sysfs itself to avoid deadlock */
++ sysfs_remove_file_self(&sess->kobj, &attr->attr);
+ close_sess(sess);
+
+ return count;
+@@ -181,6 +183,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_del(&srv->dev);
++ put_device(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+@@ -206,6 +209,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_del(&srv->dev);
++ put_device(&srv->dev);
+ } else {
+ mutex_unlock(&srv->paths_mutex);
+ }
+@@ -234,6 +238,7 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+ &sess->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
++ kobject_put(&sess->stats->kobj_stats);
+ return err;
+ }
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+@@ -290,8 +295,8 @@ remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ put_kobj:
+ kobject_del(&sess->kobj);
+- kobject_put(&sess->kobj);
+ destroy_root:
++ kobject_put(&sess->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+
+ return err;
+@@ -302,7 +307,7 @@ void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+- kobject_del(&sess->kobj);
++ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ kobject_put(&sess->kobj);
+
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index c42fd470c4eb4..3850d2a938f8e 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -222,7 +222,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct rtrs_srv *srv = sess->srv;
+- struct ib_send_wr inv_wr, imm_wr;
++ struct ib_send_wr inv_wr;
++ struct ib_rdma_wr imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+@@ -267,21 +268,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ WARN_ON_ONCE(rkey != wr->rkey);
+
+ wr->wr.opcode = IB_WR_RDMA_WRITE;
++ wr->wr.wr_cqe = &io_comp_cqe;
+ wr->wr.ex.imm_data = 0;
+ wr->wr.send_flags = 0;
+
+ if (need_inval && always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &inv_wr;
+- inv_wr.next = &imm_wr;
++ inv_wr.next = &imm_wr.wr;
+ } else if (always_invalidate) {
+ wr->wr.next = &rwr.wr;
+- rwr.wr.next = &imm_wr;
++ rwr.wr.next = &imm_wr.wr;
+ } else if (need_inval) {
+ wr->wr.next = &inv_wr;
+- inv_wr.next = &imm_wr;
++ inv_wr.next = &imm_wr.wr;
+ } else {
+- wr->wr.next = &imm_wr;
++ wr->wr.next = &imm_wr.wr;
+ }
+ /*
+ * From time to time we have to post signaled sends,
+@@ -294,16 +296,18 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
++ inv_wr.wr_cqe = &io_comp_cqe;
+ inv_wr.send_flags = 0;
+ inv_wr.ex.invalidate_rkey = rkey;
+ }
+
+- imm_wr.next = NULL;
++ imm_wr.wr.next = NULL;
+ if (always_invalidate) {
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.opcode = IB_WR_REG_MR;
++ rwr.wr.wr_cqe = &local_reg_cqe;
+ rwr.wr.num_sge = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.wr.send_flags = 0;
+@@ -318,22 +322,22 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+- imm_wr.sg_list = &list;
+- imm_wr.num_sge = 1;
+- imm_wr.opcode = IB_WR_SEND_WITH_IMM;
++ imm_wr.wr.sg_list = &list;
++ imm_wr.wr.num_sge = 1;
++ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+- imm_wr.sg_list = NULL;
+- imm_wr.num_sge = 0;
+- imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
++ imm_wr.wr.sg_list = NULL;
++ imm_wr.wr.num_sge = 0;
++ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+- imm_wr.send_flags = flags;
+- imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
++ imm_wr.wr.send_flags = flags;
++ imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 0, need_inval));
+
+- imm_wr.wr_cqe = &io_comp_cqe;
++ imm_wr.wr.wr_cqe = &io_comp_cqe;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ offset, DMA_BIDIRECTIONAL);
+
+@@ -360,7 +364,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ {
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+- struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
++ struct ib_send_wr inv_wr, *wr = NULL;
++ struct ib_rdma_wr imm_wr;
+ struct ib_reg_wr rwr;
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_mr *srv_mr;
+@@ -379,6 +384,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+
+ if (need_inval) {
+ if (likely(sg_cnt)) {
++ inv_wr.wr_cqe = &io_comp_cqe;
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+@@ -396,15 +402,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ if (need_inval && always_invalidate) {
+ wr = &inv_wr;
+ inv_wr.next = &rwr.wr;
+- rwr.wr.next = &imm_wr;
++ rwr.wr.next = &imm_wr.wr;
+ } else if (always_invalidate) {
+ wr = &rwr.wr;
+- rwr.wr.next = &imm_wr;
++ rwr.wr.next = &imm_wr.wr;
+ } else if (need_inval) {
+ wr = &inv_wr;
+- inv_wr.next = &imm_wr;
++ inv_wr.next = &imm_wr.wr;
+ } else {
+- wr = &imm_wr;
++ wr = &imm_wr.wr;
+ }
+ /*
+ * From time to time we have to post signalled sends,
+@@ -413,14 +419,15 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+- imm_wr.next = NULL;
++ imm_wr.wr.next = NULL;
+ if (always_invalidate) {
+ struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+- rwr.wr.next = &imm_wr;
++ rwr.wr.next = &imm_wr.wr;
+ rwr.wr.opcode = IB_WR_REG_MR;
++ rwr.wr.wr_cqe = &local_reg_cqe;
+ rwr.wr.num_sge = 0;
+ rwr.wr.send_flags = 0;
+ rwr.mr = srv_mr->mr;
+@@ -435,21 +442,21 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+- imm_wr.sg_list = &list;
+- imm_wr.num_sge = 1;
+- imm_wr.opcode = IB_WR_SEND_WITH_IMM;
++ imm_wr.wr.sg_list = &list;
++ imm_wr.wr.num_sge = 1;
++ imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+- imm_wr.sg_list = NULL;
+- imm_wr.num_sge = 0;
+- imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
++ imm_wr.wr.sg_list = NULL;
++ imm_wr.wr.num_sge = 0;
++ imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+- imm_wr.send_flags = flags;
+- imm_wr.wr_cqe = &io_comp_cqe;
++ imm_wr.wr.send_flags = flags;
++ imm_wr.wr.wr_cqe = &io_comp_cqe;
+
+- imm_wr.ex.imm_data = cpu_to_be32(imm);
++ imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
+
+ err = ib_post_send(id->con->c.qp, wr, NULL);
+ if (unlikely(err))
+@@ -651,7 +658,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
+ if (!srv_mr->iu) {
+ err = -ENOMEM;
+ rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+- goto free_iu;
++ goto dereg_mr;
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+@@ -667,7 +674,6 @@ err:
+ srv_mr = &sess->mrs[mri];
+ sgt = &srv_mr->sgt;
+ mr = srv_mr->mr;
+-free_iu:
+ rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ dereg_mr:
+ ib_dereg_mr(mr);
+@@ -1328,7 +1334,8 @@ static void free_srv(struct rtrs_srv *srv)
+ }
+
+ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+- const uuid_t *paths_uuid)
++ const uuid_t *paths_uuid,
++ bool first_conn)
+ {
+ struct rtrs_srv *srv;
+ int i;
+@@ -1341,13 +1348,18 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ return srv;
+ }
+ }
++ mutex_unlock(&ctx->srv_mutex);
++ /*
++ * If this request is not the first connection request from the
++ * client for this session then fail and return error.
++ */
++ if (!first_conn)
++ return ERR_PTR(-ENXIO);
+
+ /* need to allocate a new srv */
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+- if (!srv) {
+- mutex_unlock(&ctx->srv_mutex);
+- return NULL;
+- }
++ if (!srv)
++ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&srv->paths_list);
+ mutex_init(&srv->paths_mutex);
+@@ -1357,8 +1369,6 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ srv->ctx = ctx;
+ device_initialize(&srv->dev);
+ srv->dev.release = rtrs_srv_dev_release;
+- list_add(&srv->ctx_list, &ctx->srv_list);
+- mutex_unlock(&ctx->srv_mutex);
+
+ srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ GFP_KERNEL);
+@@ -1371,6 +1381,9 @@ static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ goto err_free_chunks;
+ }
+ refcount_set(&srv->refcount, 1);
++ mutex_lock(&ctx->srv_mutex);
++ list_add(&srv->ctx_list, &ctx->srv_list);
++ mutex_unlock(&ctx->srv_mutex);
+
+ return srv;
+
+@@ -1381,7 +1394,7 @@ err_free_chunks:
+
+ err_free_srv:
+ kfree(srv);
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+ }
+
+ static void put_srv(struct rtrs_srv *srv)
+@@ -1461,10 +1474,12 @@ static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+
+ static void free_sess(struct rtrs_srv_sess *sess)
+ {
+- if (sess->kobj.state_in_sysfs)
++ if (sess->kobj.state_in_sysfs) {
++ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+- else
++ } else {
+ kfree(sess);
++ }
+ }
+
+ static void rtrs_srv_close_work(struct work_struct *work)
+@@ -1586,7 +1601,7 @@ static int create_con(struct rtrs_srv_sess *sess,
+ struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_con *con;
+
+- u16 cq_size, wr_queue_size;
++ u32 cq_size, wr_queue_size;
+ int err, cq_vector;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+@@ -1600,7 +1615,7 @@ static int create_con(struct rtrs_srv_sess *sess,
+ con->c.cm_id = cm_id;
+ con->c.sess = &sess->s;
+ con->c.cid = cid;
+- atomic_set(&con->wr_cnt, 0);
++ atomic_set(&con->wr_cnt, 1);
+
+ if (con->c.cid == 0) {
+ /*
+@@ -1630,7 +1645,8 @@ static int create_con(struct rtrs_srv_sess *sess,
+
+ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+- wr_queue_size, IB_POLL_WORKQUEUE);
++ wr_queue_size, wr_queue_size,
++ IB_POLL_WORKQUEUE);
+ if (err) {
+ rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ goto free_con;
+@@ -1781,13 +1797,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ goto reject_w_econnreset;
+ }
+ recon_cnt = le16_to_cpu(msg->recon_cnt);
+- srv = get_or_create_srv(ctx, &msg->paths_uuid);
+- /*
+- * "refcount == 0" happens if a previous thread calls get_or_create_srv
+- * allocate srv, but chunks of srv are not allocated yet.
+- */
+- if (!srv || refcount_read(&srv->refcount) == 0) {
+- err = -ENOMEM;
++ srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
++ if (IS_ERR(srv)) {
++ err = PTR_ERR(srv);
+ goto reject_w_err;
+ }
+ mutex_lock(&srv->paths_mutex);
+@@ -1862,8 +1874,8 @@ reject_w_econnreset:
+ return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
+
+ close_and_return_err:
+- close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
++ close_sess(sess);
+
+ return err;
+ }
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
+index 2e3a849e0a77c..da4ff764dd3f0 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs.c
+@@ -182,16 +182,16 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head)
+ {
+- struct ib_send_wr wr;
++ struct ib_rdma_wr wr;
+
+- wr = (struct ib_send_wr) {
+- .wr_cqe = cqe,
+- .send_flags = flags,
+- .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+- .ex.imm_data = cpu_to_be32(imm_data),
++ wr = (struct ib_rdma_wr) {
++ .wr.wr_cqe = cqe,
++ .wr.send_flags = flags,
++ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
++ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ };
+
+- return rtrs_post_send(con->qp, head, &wr);
++ return rtrs_post_send(con->qp, head, &wr.wr);
+ }
+ EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
+
+@@ -231,14 +231,14 @@ static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+ }
+
+ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+- u16 wr_queue_size, u32 max_sge)
++ u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
+ {
+ struct ib_qp_init_attr init_attr = {NULL};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+- init_attr.cap.max_send_wr = wr_queue_size;
+- init_attr.cap.max_recv_wr = wr_queue_size;
++ init_attr.cap.max_send_wr = max_send_wr;
++ init_attr.cap.max_recv_wr = max_recv_wr;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.event_handler = qp_event_handler;
+ init_attr.qp_context = con;
+@@ -260,8 +260,9 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ }
+
+ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+- u32 max_send_sge, int cq_vector, u16 cq_size,
+- u16 wr_queue_size, enum ib_poll_context poll_ctx)
++ u32 max_send_sge, int cq_vector, int cq_size,
++ u32 max_send_wr, u32 max_recv_wr,
++ enum ib_poll_context poll_ctx)
+ {
+ int err;
+
+@@ -269,7 +270,8 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ if (err)
+ return err;
+
+- err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
++ err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
++ max_send_sge);
+ if (err) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
+index a2b5fbba2d3b3..430dc69750048 100644
+--- a/drivers/input/joydev.c
++++ b/drivers/input/joydev.c
+@@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
+ if (IS_ERR(abspam))
+ return PTR_ERR(abspam);
+
+- for (i = 0; i < joydev->nabs; i++) {
++ for (i = 0; i < len && i < joydev->nabs; i++) {
+ if (abspam[i] > ABS_MAX) {
+ retval = -EINVAL;
+ goto out;
+@@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ int i;
+ int retval = 0;
+
++ if (len % sizeof(*keypam))
++ return -EINVAL;
++
+ len = min(len, sizeof(joydev->keypam));
+
+ /* Validate the map. */
+@@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ if (IS_ERR(keypam))
+ return PTR_ERR(keypam);
+
+- for (i = 0; i < joydev->nkey; i++) {
++ for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
+ if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
+ retval = -EINVAL;
+ goto out;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 8cc8ca4a9ac01..9f0d07dcbf06b 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -305,6 +305,7 @@ static const struct xpad_device {
+ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
++ { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
+index 2b321c17054ad..94eab82086b27 100644
+--- a/drivers/input/keyboard/Kconfig
++++ b/drivers/input/keyboard/Kconfig
+@@ -446,7 +446,7 @@ config KEYBOARD_MPR121
+
+ config KEYBOARD_SNVS_PWRKEY
+ tristate "IMX SNVS Power Key Driver"
+- depends on ARCH_MXC || COMPILE_TEST
++ depends on ARCH_MXC || (COMPILE_TEST && HAS_IOMEM)
+ depends on OF
+ help
+ This is the snvs powerkey driver for the Freescale i.MX application
+diff --git a/drivers/input/misc/da7280.c b/drivers/input/misc/da7280.c
+index 37568b00873d4..b08610d6e575e 100644
+--- a/drivers/input/misc/da7280.c
++++ b/drivers/input/misc/da7280.c
+@@ -863,6 +863,7 @@ static void da7280_parse_properties(struct device *dev,
+ gpi_str3[7] = '0' + i;
+ haptics->gpi_ctl[i].polarity = 0;
+ error = device_property_read_string(dev, gpi_str3, &str);
++ if (!error)
+ haptics->gpi_ctl[i].polarity =
+ da7280_haptic_of_gpi_pol_str(dev, str);
+ }
+@@ -1299,11 +1300,13 @@ static int __maybe_unused da7280_resume(struct device *dev)
+ return retval;
+ }
+
++#ifdef CONFIG_OF
+ static const struct of_device_id da7280_of_match[] = {
+ { .compatible = "dlg,da7280", },
+ { }
+ };
+ MODULE_DEVICE_TABLE(of, da7280_of_match);
++#endif
+
+ static const struct i2c_device_id da7280_i2c_id[] = {
+ { "da7280", },
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index c74b020796a94..9119e12a57784 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -588,6 +588,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
++ },
+ },
+ { }
+ };
+diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
+index 8ac970a423de6..33e9d9bfd036f 100644
+--- a/drivers/input/serio/serport.c
++++ b/drivers/input/serio/serport.c
+@@ -156,7 +156,9 @@ out:
+ * returning 0 characters.
+ */
+
+-static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, unsigned char __user * buf, size_t nr)
++static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
++ unsigned char *kbuf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ struct serport *serport = (struct serport*) tty->disc_data;
+ struct serio *serio;
+diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
+index e0bacd34866ad..96173232e53fe 100644
+--- a/drivers/input/touchscreen/elo.c
++++ b/drivers/input/touchscreen/elo.c
+@@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
+ switch (elo->id) {
+
+ case 0: /* 10-byte protocol */
+- if (elo_setup_10(elo))
++ if (elo_setup_10(elo)) {
++ err = -EIO;
+ goto fail3;
++ }
+
+ break;
+
+diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
+index 603a948460d64..4d2d22a869773 100644
+--- a/drivers/input/touchscreen/raydium_i2c_ts.c
++++ b/drivers/input/touchscreen/raydium_i2c_ts.c
+@@ -445,6 +445,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
+ enum raydium_bl_ack state)
+ {
+ int error;
++ static const u8 cmd[] = { 0xFF, 0x39 };
+
+ error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
+ if (error) {
+@@ -453,7 +454,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
+ return error;
+ }
+
+- error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
++ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
+ if (error) {
+ dev_err(&client->dev, "Ack obj command failed: %d\n", error);
+ return error;
+diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
+index b4e7bcbe9b91d..6abae665ca71d 100644
+--- a/drivers/input/touchscreen/st1232.c
++++ b/drivers/input/touchscreen/st1232.c
+@@ -94,8 +94,13 @@ static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+
+ for (retries = 10; retries; retries--) {
+ error = st1232_ts_read_data(ts, REG_STATUS, 1);
+- if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
+- return 0;
++ if (!error) {
++ switch (ts->read_buf[0]) {
++ case STATUS_NORMAL | ERROR_NONE:
++ case STATUS_IDLE | ERROR_NONE:
++ return 0;
++ }
++ }
+
+ usleep_range(1000, 2000);
+ }
+diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
+index 620cdd7d214a6..12f2562b0141b 100644
+--- a/drivers/input/touchscreen/sur40.c
++++ b/drivers/input/touchscreen/sur40.c
+@@ -787,6 +787,7 @@ static int sur40_probe(struct usb_interface *interface,
+ dev_err(&interface->dev,
+ "Unable to register video controls.");
+ v4l2_ctrl_handler_free(&sur40->hdl);
++ error = sur40->hdl.error;
+ goto err_unreg_v4l2;
+ }
+
+diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
+index a3e3adbabc673..b1548971d683e 100644
+--- a/drivers/input/touchscreen/zinitix.c
++++ b/drivers/input/touchscreen/zinitix.c
+@@ -190,7 +190,7 @@ static int zinitix_write_cmd(struct i2c_client *client, u16 reg)
+ return 0;
+ }
+
+-static bool zinitix_init_touch(struct bt541_ts_data *bt541)
++static int zinitix_init_touch(struct bt541_ts_data *bt541)
+ {
+ struct i2c_client *client = bt541->client;
+ int i;
+diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+index 8ca7415d785d9..c70d6e79f5346 100644
+--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+@@ -2280,7 +2280,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
+ {
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+- arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start,
++ arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start + 1,
+ gather->pgsize, true, smmu_domain);
+ }
+
+diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+index bcda17012aee8..abb1d2f4ce301 100644
+--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
++++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+@@ -206,6 +206,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
+ smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+
+ if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
++ /* Ignore valid bit for SMR mask extraction. */
++ smr &= ~ARM_SMMU_SMR_VALID;
+ smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+ smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+ smmu->smrs[i].valid = true;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index ffeebda8d6def..fd5f59373fc62 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -2426,9 +2426,6 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ size -= pgsize;
+ }
+
+- if (ops->iotlb_sync_map)
+- ops->iotlb_sync_map(domain);
+-
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ iommu_unmap(domain, orig_iova, orig_size - size);
+@@ -2438,18 +2435,31 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ return ret;
+ }
+
++static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
++ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
++{
++ const struct iommu_ops *ops = domain->ops;
++ int ret;
++
++ ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
++ if (ret == 0 && ops->iotlb_sync_map)
++ ops->iotlb_sync_map(domain);
++
++ return ret;
++}
++
+ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+ {
+ might_sleep();
+- return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
++ return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+ }
+ EXPORT_SYMBOL_GPL(iommu_map);
+
+ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+ {
+- return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
++ return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+ }
+ EXPORT_SYMBOL_GPL(iommu_map_atomic);
+
+@@ -2533,6 +2543,7 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
+ {
++ const struct iommu_ops *ops = domain->ops;
+ size_t len = 0, mapped = 0;
+ phys_addr_t start;
+ unsigned int i = 0;
+@@ -2563,6 +2574,8 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ sg = sg_next(sg);
+ }
+
++ if (ops->iotlb_sync_map)
++ ops->iotlb_sync_map(domain);
+ return mapped;
+
+ out_err:
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 8e56cec532e71..bfe6ec329f8d5 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -444,7 +444,7 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+ {
+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+- size_t length = gather->end - gather->start;
++ size_t length = gather->end - gather->start + 1;
+
+ if (gather->start == ULONG_MAX)
+ return;
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index b147f22a78f48..d7d1a0fab2c1a 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -457,7 +457,8 @@ config IMX_IRQSTEER
+ Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+
+ config IMX_INTMUX
+- def_bool y if ARCH_MXC || COMPILE_TEST
++ bool "i.MX INTMUX support" if COMPILE_TEST
++ default y if ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX INTMUX interrupt multiplexer.
+diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
+index 12aeeab432893..32562b7e681b5 100644
+--- a/drivers/irqchip/irq-loongson-pch-msi.c
++++ b/drivers/irqchip/irq-loongson-pch-msi.c
+@@ -225,7 +225,7 @@ static int pch_msi_init(struct device_node *node,
+ goto err_priv;
+ }
+
+- priv->msi_map = bitmap_alloc(priv->num_irqs, GFP_KERNEL);
++ priv->msi_map = bitmap_zalloc(priv->num_irqs, GFP_KERNEL);
+ if (!priv->msi_map) {
+ ret = -ENOMEM;
+ goto err_priv;
+diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
+index f94f974a87645..853b3972dbe78 100644
+--- a/drivers/irqchip/irq-ls-extirq.c
++++ b/drivers/irqchip/irq-ls-extirq.c
+@@ -64,7 +64,7 @@ static struct irq_chip ls_extirq_chip = {
+ .irq_set_type = ls_extirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+- .flags = IRQCHIP_SET_TYPE_MASKED,
++ .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ };
+
+ static int
+diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
+index 0ee3272491501..2633bc254935c 100644
+--- a/drivers/macintosh/adb-iop.c
++++ b/drivers/macintosh/adb-iop.c
+@@ -19,6 +19,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #include
+
+@@ -249,7 +250,7 @@ static void adb_iop_set_ap_complete(struct iop_msg *msg)
+ {
+ struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message;
+
+- autopoll_devs = (amsg->data[1] << 8) | amsg->data[0];
++ autopoll_devs = get_unaligned_be16(amsg->data);
+ if (autopoll_devs & (1 << autopoll_addr))
+ return;
+ autopoll_addr = autopoll_devs ? (ffs(autopoll_devs) - 1) : 0;
+@@ -266,8 +267,7 @@ static int adb_iop_autopoll(int devs)
+ amsg.flags = ADB_IOP_SET_AUTOPOLL | (mask ? ADB_IOP_AUTOPOLL : 0);
+ amsg.count = 2;
+ amsg.cmd = 0;
+- amsg.data[0] = mask & 0xFF;
+- amsg.data[1] = (mask >> 8) & 0xFF;
++ put_unaligned_be16(mask, amsg.data);
+
+ iop_send_message(ADB_IOP, ADB_CHAN, NULL, sizeof(amsg), (__u8 *)&amsg,
+ adb_iop_set_ap_complete);
+diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
+index 67fb10885bb4f..9f71de666e3f6 100644
+--- a/drivers/mailbox/arm_mhuv2.c
++++ b/drivers/mailbox/arm_mhuv2.c
+@@ -699,7 +699,9 @@ static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
+ ret = IRQ_HANDLED;
+ }
+
+- kfree(data);
++ if (!IS_ERR(data))
++ kfree(data);
++
+ return ret;
+ }
+
+diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
+index f6fab24ae8a9a..4c325301a2fe8 100644
+--- a/drivers/mailbox/sprd-mailbox.c
++++ b/drivers/mailbox/sprd-mailbox.c
+@@ -35,7 +35,7 @@
+ #define SPRD_MBOX_IRQ_CLR BIT(0)
+
+ /* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+-#define SPRD_OUTBOX_FIFO_FULL BIT(0)
++#define SPRD_OUTBOX_FIFO_FULL BIT(2)
+ #define SPRD_OUTBOX_FIFO_WR_SHIFT 16
+ #define SPRD_OUTBOX_FIFO_RD_SHIFT 24
+ #define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 1d57f48307e66..e8bf4f752e8be 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -1001,6 +1001,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
+
+ extern struct workqueue_struct *bcache_wq;
+ extern struct workqueue_struct *bch_journal_wq;
++extern struct workqueue_struct *bch_flush_wq;
+ extern struct mutex bch_register_lock;
+ extern struct list_head bch_cache_sets;
+
+@@ -1042,5 +1043,7 @@ void bch_debug_exit(void);
+ void bch_debug_init(void);
+ void bch_request_exit(void);
+ int bch_request_init(void);
++void bch_btree_exit(void);
++int bch_btree_init(void);
+
+ #endif /* _BCACHE_H */
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 910df242c83df..fe6dce125aba2 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -99,6 +99,8 @@
+ #define PTR_HASH(c, k) \
+ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+
++static struct workqueue_struct *btree_io_wq;
++
+ #define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+
+@@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl)
+ btree_complete_write(b, w);
+
+ if (btree_node_dirty(b))
+- schedule_delayed_work(&b->work, 30 * HZ);
++ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+
+ closure_return_with_destructor(cl, btree_node_write_unlock);
+ }
+@@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
+ BUG_ON(!i->keys);
+
+ if (!btree_node_dirty(b))
+- schedule_delayed_work(&b->work, 30 * HZ);
++ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+
+ set_btree_node_dirty(b);
+
+@@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf)
+ spin_lock_init(&buf->lock);
+ array_allocator_init(&buf->freelist);
+ }
++
++void bch_btree_exit(void)
++{
++ if (btree_io_wq)
++ destroy_workqueue(btree_io_wq);
++}
++
++int __init bch_btree_init(void)
++{
++ btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
++ if (!btree_io_wq)
++ return -ENOMEM;
++
++ return 0;
++}
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index aefbdb7e003bc..c6613e8173337 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -932,8 +932,8 @@ atomic_t *bch_journal(struct cache_set *c,
+ journal_try_write(c);
+ } else if (!w->dirty) {
+ w->dirty = true;
+- schedule_delayed_work(&c->journal.work,
+- msecs_to_jiffies(c->journal_delay_ms));
++ queue_delayed_work(bch_flush_wq, &c->journal.work,
++ msecs_to_jiffies(c->journal_delay_ms));
+ spin_unlock(&c->journal.lock);
+ } else {
+ spin_unlock(&c->journal.lock);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 2047a9cccdb5d..7457ec160c9a1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -49,6 +49,7 @@ static int bcache_major;
+ static DEFINE_IDA(bcache_device_idx);
+ static wait_queue_head_t unregister_wait;
+ struct workqueue_struct *bcache_wq;
++struct workqueue_struct *bch_flush_wq;
+ struct workqueue_struct *bch_journal_wq;
+
+
+@@ -2821,6 +2822,9 @@ static void bcache_exit(void)
+ destroy_workqueue(bcache_wq);
+ if (bch_journal_wq)
+ destroy_workqueue(bch_journal_wq);
++ if (bch_flush_wq)
++ destroy_workqueue(bch_flush_wq);
++ bch_btree_exit();
+
+ if (bcache_major)
+ unregister_blkdev(bcache_major, "bcache");
+@@ -2876,10 +2880,26 @@ static int __init bcache_init(void)
+ return bcache_major;
+ }
+
++ if (bch_btree_init())
++ goto err;
++
+ bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ if (!bcache_wq)
+ goto err;
+
++ /*
++ * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
++ *
++ * 1. It used `system_wq` before which also does no memory reclaim.
++ * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
++ * reduced throughput can be observed.
++ *
++ * We still want to user our own queue to not congest the `system_wq`.
++ */
++ bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
++ if (!bch_flush_wq)
++ goto err;
++
+ bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ if (!bch_journal_wq)
+ goto err;
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 086d293c2b036..2576c966a0096 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -102,6 +102,10 @@ struct mapped_device {
+ /* kobject and completion */
+ struct dm_kobject_holder kobj_holder;
+
++ int swap_bios;
++ struct semaphore swap_bios_semaphore;
++ struct mutex swap_bios_lock;
++
+ struct dm_stats stats;
+
+ /* for blk-mq request-based DM support */
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 5a55617a08e68..07aa619d36e7b 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -3324,6 +3324,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ wake_up_process(cc->write_thread);
+
+ ti->num_flush_bios = 1;
++ ti->limit_swap_bios = true;
+
+ return 0;
+
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index b24e3839bb3a1..d9ac7372108c9 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -47,6 +47,7 @@ struct writeset {
+ static void writeset_free(struct writeset *ws)
+ {
+ vfree(ws->bits);
++ ws->bits = NULL;
+ }
+
+ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+@@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits)
+ */
+ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ {
+- ws->md.nr_bits = nr_blocks;
+- ws->md.root = INVALID_WRITESET_ROOT;
+ ws->bits = vzalloc(bitset_size(nr_blocks));
+ if (!ws->bits) {
+ DMERR("%s: couldn't allocate in memory bitset", __func__);
+@@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ /*
+ * Wipes the in-core bitset, and creates a new on disk bitset.
+ */
+-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
++static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
++ dm_block_t nr_blocks)
+ {
+ int r;
+
+- memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
++ memset(ws->bits, 0, bitset_size(nr_blocks));
+
++ ws->md.nr_bits = nr_blocks;
+ r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
+ if (r) {
+ DMERR("%s: setup_on_disk_bitset failed", __func__);
+@@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
+ {
+ int r;
+
+- if (!test_and_set_bit(block, ws->bits)) {
++ if (!test_bit(block, ws->bits)) {
+ r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
+ if (r) {
+ /* FIXME: fail mode */
+@@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value)
+
+ static int ws_eq(void *context, const void *value1, const void *value2)
+ {
+- return !memcmp(value1, value2, sizeof(struct writeset_metadata));
++ return !memcmp(value1, value2, sizeof(struct writeset_disk));
+ }
+
+ /*----------------------------------------------------------------*/
+@@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md)
+ }
+
+ disk = dm_block_data(sblock);
++
++ /* Verify the data block size hasn't changed */
++ if (le32_to_cpu(disk->data_block_size) != md->block_size) {
++ DMERR("changing the data block size (from %u to %llu) is not supported",
++ le32_to_cpu(disk->data_block_size), md->block_size);
++ r = -EINVAL;
++ goto bad;
++ }
++
+ r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
+ disk->metadata_space_map_root,
+ sizeof(disk->metadata_space_map_root),
+@@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md)
+
+ setup_infos(md);
+
+- md->block_size = le32_to_cpu(disk->data_block_size);
+ md->nr_blocks = le32_to_cpu(disk->nr_blocks);
+ md->current_era = le32_to_cpu(disk->current_era);
+
++ ws_unpack(&disk->current_writeset, &md->current_writeset->md);
+ md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
+ md->era_array_root = le64_to_cpu(disk->era_array_root);
+ md->metadata_snap = le64_to_cpu(disk->metadata_snap);
+@@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md,
+ ws_unpack(&disk, &d->writeset);
+ d->value = cpu_to_le32(key);
+
++ /*
++ * We initialise another bitset info to avoid any caching side effects
++ * with the previous one.
++ */
++ dm_disk_bitset_init(md->tm, &d->info);
++
+ d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
+ d->current_bit = 0;
+ d->step = metadata_digest_transcribe_writeset;
+@@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
+ return 0;
+
+ memset(d, 0, sizeof(*d));
+-
+- /*
+- * We initialise another bitset info to avoid any caching side
+- * effects with the previous one.
+- */
+- dm_disk_bitset_init(md->tm, &d->info);
+ d->step = metadata_digest_lookup_writeset;
+
+ return 0;
+@@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev,
+
+ static void metadata_close(struct era_metadata *md)
+ {
++ writeset_free(&md->writesets[0]);
++ writeset_free(&md->writesets[1]);
+ destroy_persistent_data_objects(md);
+ kfree(md);
+ }
+@@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg)
+ r = writeset_alloc(&md->writesets[1], *new_size);
+ if (r) {
+ DMERR("%s: writeset_alloc failed for writeset 1", __func__);
++ writeset_free(&md->writesets[0]);
+ return r;
+ }
+
+@@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg)
+ &value, &md->era_array_root);
+ if (r) {
+ DMERR("%s: dm_array_resize failed", __func__);
++ writeset_free(&md->writesets[0]);
++ writeset_free(&md->writesets[1]);
+ return r;
+ }
+
+@@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md)
+ }
+
+ ws_pack(&md->current_writeset->md, &value);
+- md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+
+ keys[0] = md->current_era;
+ __dm_bless_for_disk(&value);
+@@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md)
+ return r;
+ }
+
++ md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+ md->archived_writesets = true;
+
+ return 0;
+@@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md)
+ int r;
+ struct writeset *new_writeset = next_writeset(md);
+
+- r = writeset_init(&md->bitset_info, new_writeset);
++ r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
+ if (r) {
+ DMERR("%s: writeset_init failed", __func__);
+ return r;
+@@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md)
+ int r;
+ struct dm_block *sblock;
+
+- if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
++ if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
+ r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
+ &md->current_writeset->md.root);
+ if (r) {
+@@ -1225,8 +1240,10 @@ static void process_deferred_bios(struct era *era)
+ int r;
+ struct bio_list deferred_bios, marked_bios;
+ struct bio *bio;
++ struct blk_plug plug;
+ bool commit_needed = false;
+ bool failed = false;
++ struct writeset *ws = era->md->current_writeset;
+
+ bio_list_init(&deferred_bios);
+ bio_list_init(&marked_bios);
+@@ -1236,9 +1253,11 @@ static void process_deferred_bios(struct era *era)
+ bio_list_init(&era->deferred_bios);
+ spin_unlock(&era->deferred_lock);
+
++ if (bio_list_empty(&deferred_bios))
++ return;
++
+ while ((bio = bio_list_pop(&deferred_bios))) {
+- r = writeset_test_and_set(&era->md->bitset_info,
+- era->md->current_writeset,
++ r = writeset_test_and_set(&era->md->bitset_info, ws,
+ get_block(era, bio));
+ if (r < 0) {
+ /*
+@@ -1246,7 +1265,6 @@ static void process_deferred_bios(struct era *era)
+ * FIXME: finish.
+ */
+ failed = true;
+-
+ } else if (r == 0)
+ commit_needed = true;
+
+@@ -1262,9 +1280,19 @@ static void process_deferred_bios(struct era *era)
+ if (failed)
+ while ((bio = bio_list_pop(&marked_bios)))
+ bio_io_error(bio);
+- else
+- while ((bio = bio_list_pop(&marked_bios)))
++ else {
++ blk_start_plug(&plug);
++ while ((bio = bio_list_pop(&marked_bios))) {
++ /*
++ * Only update the in-core writeset if the on-disk one
++ * was updated too.
++ */
++ if (commit_needed)
++ set_bit(get_block(era, bio), ws->bits);
+ submit_bio_noacct(bio);
++ }
++ blk_finish_plug(&plug);
++ }
+ }
+
+ static void process_rpc_calls(struct era *era)
+@@ -1473,15 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ }
+ era->md = md;
+
+- era->nr_blocks = calc_nr_blocks(era);
+-
+- r = metadata_resize(era->md, &era->nr_blocks);
+- if (r) {
+- ti->error = "couldn't resize metadata";
+- era_destroy(era);
+- return -ENOMEM;
+- }
+-
+ era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!era->wq) {
+ ti->error = "could not create workqueue for metadata object";
+@@ -1556,16 +1575,24 @@ static int era_preresume(struct dm_target *ti)
+ dm_block_t new_size = calc_nr_blocks(era);
+
+ if (era->nr_blocks != new_size) {
+- r = in_worker1(era, metadata_resize, &new_size);
+- if (r)
++ r = metadata_resize(era->md, &new_size);
++ if (r) {
++ DMERR("%s: metadata_resize failed", __func__);
++ return r;
++ }
++
++ r = metadata_commit(era->md);
++ if (r) {
++ DMERR("%s: metadata_commit failed", __func__);
+ return r;
++ }
+
+ era->nr_blocks = new_size;
+ }
+
+ start_worker(era);
+
+- r = in_worker0(era, metadata_new_era);
++ r = in_worker0(era, metadata_era_rollover);
+ if (r) {
+ DMERR("%s: metadata_era_rollover failed", __func__);
+ return r;
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 4acf2342f7adf..77086db8b9200 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -820,24 +820,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
+ EXPORT_SYMBOL_GPL(dm_table_set_type);
+
+ /* validate the dax capability of the target device span */
+-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
++int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+ {
+ int blocksize = *(int *) data, id;
+ bool rc;
+
+ id = dax_read_lock();
+- rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
++ rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+ dax_read_unlock(id);
+
+ return rc;
+ }
+
+ /* Check devices support synchronous DAX */
+-static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+- sector_t start, sector_t len, void *data)
++static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
+ {
+- return dev->dax_dev && dax_synchronous(dev->dax_dev);
++ return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
+ }
+
+ bool dm_table_supports_dax(struct dm_table *t,
+@@ -854,7 +854,7 @@ bool dm_table_supports_dax(struct dm_table *t,
+ return false;
+
+ if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, iterate_fn, blocksize))
++ ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ return false;
+ }
+
+@@ -925,7 +925,7 @@ static int dm_table_determine_type(struct dm_table *t)
+ verify_bio_based:
+ /* We must use this table as bio-based */
+ t->type = DM_TYPE_BIO_BASED;
+- if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
++ if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
+ t->type = DM_TYPE_DAX_BIO_BASED;
+ }
+@@ -1295,6 +1295,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
+ return &t->targets[(KEYS_PER_NODE * n) + k];
+ }
+
++/*
++ * type->iterate_devices() should be called when the sanity check needs to
++ * iterate and check all underlying data devices. iterate_devices() will
++ * iterate all underlying data devices until it encounters a non-zero return
++ * code, returned by whether the input iterate_devices_callout_fn, or
++ * iterate_devices() itself internally.
++ *
++ * For some target type (e.g. dm-stripe), one call of iterate_devices() may
++ * iterate multiple underlying devices internally, in which case a non-zero
++ * return code returned by iterate_devices_callout_fn will stop the iteration
++ * in advance.
++ *
++ * Cases requiring _any_ underlying device supporting some kind of attribute,
++ * should use the iteration structure like dm_table_any_dev_attr(), or call
++ * it directly. @func should handle semantics of positive examples, e.g.
++ * capable of something.
++ *
++ * Cases requiring _all_ underlying devices supporting some kind of attribute,
++ * should use the iteration structure like dm_table_supports_nowait() or
++ * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that
++ * uses an @anti_func that handle semantics of counter examples, e.g. not
++ * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data);
++ */
++static bool dm_table_any_dev_attr(struct dm_table *t,
++ iterate_devices_callout_fn func, void *data)
++{
++ struct dm_target *ti;
++ unsigned int i;
++
++ for (i = 0; i < dm_table_get_num_targets(t); i++) {
++ ti = dm_table_get_target(t, i);
++
++ if (ti->type->iterate_devices &&
++ ti->type->iterate_devices(ti, func, data))
++ return true;
++ }
++
++ return false;
++}
++
+ static int count_device(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+ {
+@@ -1331,13 +1371,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
+ return true;
+ }
+
+-static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+- sector_t start, sector_t len, void *data)
++static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
+ {
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ enum blk_zoned_model *zoned_model = data;
+
+- return q && blk_queue_zoned_model(q) == *zoned_model;
++ return !q || blk_queue_zoned_model(q) != *zoned_model;
+ }
+
+ static bool dm_table_supports_zoned_model(struct dm_table *t,
+@@ -1354,37 +1394,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
+ return false;
+
+ if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
++ ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
+ return false;
+ }
+
+ return true;
+ }
+
+-static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
+- sector_t start, sector_t len, void *data)
++static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
+ {
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ unsigned int *zone_sectors = data;
+
+- return q && blk_queue_zone_sectors(q) == *zone_sectors;
+-}
+-
+-static bool dm_table_matches_zone_sectors(struct dm_table *t,
+- unsigned int zone_sectors)
+-{
+- struct dm_target *ti;
+- unsigned i;
+-
+- for (i = 0; i < dm_table_get_num_targets(t); i++) {
+- ti = dm_table_get_target(t, i);
+-
+- if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
+- return false;
+- }
+-
+- return true;
++ return !q || blk_queue_zone_sectors(q) != *zone_sectors;
+ }
+
+ static int validate_hardware_zoned_model(struct dm_table *table,
+@@ -1404,7 +1427,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
+ if (!zone_sectors || !is_power_of_2(zone_sectors))
+ return -EINVAL;
+
+- if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
++ if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
+ DMERR("%s: zone sectors is not consistent across all devices",
+ dm_device_name(table->md));
+ return -EINVAL;
+@@ -1578,29 +1601,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti,
+ return false;
+ }
+
+-static int dm_table_supports_dax_write_cache(struct dm_table *t)
+-{
+- struct dm_target *ti;
+- unsigned i;
+-
+- for (i = 0; i < dm_table_get_num_targets(t); i++) {
+- ti = dm_table_get_target(t, i);
+-
+- if (ti->type->iterate_devices &&
+- ti->type->iterate_devices(ti,
+- device_dax_write_cache_enabled, NULL))
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+- sector_t start, sector_t len, void *data)
++static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
+ {
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+- return q && blk_queue_nonrot(q);
++ return q && !blk_queue_nonrot(q);
+ }
+
+ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+@@ -1611,23 +1617,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+ return q && !blk_queue_add_random(q);
+ }
+
+-static bool dm_table_all_devices_attribute(struct dm_table *t,
+- iterate_devices_callout_fn func)
+-{
+- struct dm_target *ti;
+- unsigned i;
+-
+- for (i = 0; i < dm_table_get_num_targets(t); i++) {
+- ti = dm_table_get_target(t, i);
+-
+- if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, func, NULL))
+- return false;
+- }
+-
+- return true;
+-}
+-
+ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+ {
+@@ -1779,27 +1768,6 @@ static int device_requires_stable_pages(struct dm_target *ti,
+ return q && blk_queue_stable_writes(q);
+ }
+
+-/*
+- * If any underlying device requires stable pages, a table must require
+- * them as well. Only targets that support iterate_devices are considered:
+- * don't want error, zero, etc to require stable pages.
+- */
+-static bool dm_table_requires_stable_pages(struct dm_table *t)
+-{
+- struct dm_target *ti;
+- unsigned i;
+-
+- for (i = 0; i < dm_table_get_num_targets(t); i++) {
+- ti = dm_table_get_target(t, i);
+-
+- if (ti->type->iterate_devices &&
+- ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
+- return true;
+- }
+-
+- return false;
+-}
+-
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits)
+ {
+@@ -1837,22 +1805,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ }
+ blk_queue_write_cache(q, wc, fua);
+
+- if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
++ if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
+ blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+- if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
++ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
+ set_dax_synchronous(t->md->dax_dev);
+ }
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
+- if (dm_table_supports_dax_write_cache(t))
++ if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
+ dax_write_cache(t->md->dax_dev, true);
+
+ /* Ensure that all underlying devices are non-rotational. */
+- if (dm_table_all_devices_attribute(t, device_is_nonrot))
+- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+- else
++ if (dm_table_any_dev_attr(t, device_is_rotational, NULL))
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
++ else
++ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+
+ if (!dm_table_supports_write_same(t))
+ q->limits.max_write_same_sectors = 0;
+@@ -1864,8 +1832,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ /*
+ * Some devices don't use blk_integrity but still want stable pages
+ * because they do their own checksumming.
++ * If any underlying device requires stable pages, a table must require
++ * them as well. Only targets that support iterate_devices are considered:
++ * don't want error, zero, etc to require stable pages.
+ */
+- if (dm_table_requires_stable_pages(t))
++ if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL))
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
+@@ -1876,7 +1847,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+ * have it set.
+ */
+- if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
++ if (blk_queue_add_random(q) &&
++ dm_table_any_dev_attr(t, device_is_not_random, NULL))
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+
+ /*
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index d5223a0e5cc51..8628c4aa2e854 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -148,6 +148,7 @@ struct dm_writecache {
+ size_t metadata_sectors;
+ size_t n_blocks;
+ uint64_t seq_count;
++ sector_t data_device_sectors;
+ void *block_start;
+ struct wc_entry *entries;
+ unsigned block_size;
+@@ -159,14 +160,22 @@ struct dm_writecache {
+ bool overwrote_committed:1;
+ bool memory_vmapped:1;
+
++ bool start_sector_set:1;
+ bool high_wm_percent_set:1;
+ bool low_wm_percent_set:1;
+ bool max_writeback_jobs_set:1;
+ bool autocommit_blocks_set:1;
+ bool autocommit_time_set:1;
++ bool max_age_set:1;
+ bool writeback_fua_set:1;
+ bool flush_on_suspend:1;
+ bool cleaner:1;
++ bool cleaner_set:1;
++
++ unsigned high_wm_percent_value;
++ unsigned low_wm_percent_value;
++ unsigned autocommit_time_value;
++ unsigned max_age_value;
+
+ unsigned writeback_all;
+ struct workqueue_struct *writeback_wq;
+@@ -523,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
+
+ region.bdev = wc->ssd_dev->bdev;
+ region.sector = 0;
+- region.count = PAGE_SIZE;
++ region.count = PAGE_SIZE >> SECTOR_SHIFT;
+
+ if (unlikely(region.sector + region.count > wc->metadata_sectors))
+ region.count = wc->metadata_sectors - region.sector;
+@@ -969,6 +978,8 @@ static void writecache_resume(struct dm_target *ti)
+
+ wc_lock(wc);
+
++ wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
++
+ if (WC_MODE_PMEM(wc)) {
+ persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+ } else {
+@@ -1638,6 +1649,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t
+ void *address = memory_data(wc, e);
+
+ persistent_memory_flush_cache(address, block_size);
++
++ if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
++ return true;
++
+ return bio_add_page(&wb->bio, persistent_memory_page(address),
+ block_size, persistent_memory_page_offset(address)) != 0;
+ }
+@@ -1709,6 +1724,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
+ if (writecache_has_error(wc)) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
++ } else if (unlikely(!bio_sectors(bio))) {
++ bio->bi_status = BLK_STS_OK;
++ bio_endio(bio);
+ } else {
+ submit_bio(bio);
+ }
+@@ -1752,6 +1770,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
+ e = f;
+ }
+
++ if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
++ if (to.sector >= wc->data_device_sectors) {
++ writecache_copy_endio(0, 0, c);
++ continue;
++ }
++ from.count = to.count = wc->data_device_sectors - to.sector;
++ }
++
+ dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
+
+ __writeback_throttle(wc, wbl);
+@@ -2205,6 +2231,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
+ goto invalid_optional;
+ wc->start_sector = start_sector;
++ wc->start_sector_set = true;
+ if (wc->start_sector != start_sector ||
+ wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
+ goto invalid_optional;
+@@ -2214,6 +2241,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ goto invalid_optional;
+ if (high_wm_percent < 0 || high_wm_percent > 100)
+ goto invalid_optional;
++ wc->high_wm_percent_value = high_wm_percent;
+ wc->high_wm_percent_set = true;
+ } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
+ string = dm_shift_arg(&as), opt_params--;
+@@ -2221,6 +2249,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ goto invalid_optional;
+ if (low_wm_percent < 0 || low_wm_percent > 100)
+ goto invalid_optional;
++ wc->low_wm_percent_value = low_wm_percent;
+ wc->low_wm_percent_set = true;
+ } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
+ string = dm_shift_arg(&as), opt_params--;
+@@ -2240,6 +2269,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ if (autocommit_msecs > 3600000)
+ goto invalid_optional;
+ wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
++ wc->autocommit_time_value = autocommit_msecs;
+ wc->autocommit_time_set = true;
+ } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
+ unsigned max_age_msecs;
+@@ -2249,7 +2279,10 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ if (max_age_msecs > 86400000)
+ goto invalid_optional;
+ wc->max_age = msecs_to_jiffies(max_age_msecs);
++ wc->max_age_set = true;
++ wc->max_age_value = max_age_msecs;
+ } else if (!strcasecmp(string, "cleaner")) {
++ wc->cleaner_set = true;
+ wc->cleaner = true;
+ } else if (!strcasecmp(string, "fua")) {
+ if (WC_MODE_PMEM(wc)) {
+@@ -2455,7 +2488,6 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ struct dm_writecache *wc = ti->private;
+ unsigned extra_args;
+ unsigned sz = 0;
+- uint64_t x;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+@@ -2467,11 +2499,11 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
+ wc->dev->name, wc->ssd_dev->name, wc->block_size);
+ extra_args = 0;
+- if (wc->start_sector)
++ if (wc->start_sector_set)
+ extra_args += 2;
+- if (wc->high_wm_percent_set && !wc->cleaner)
++ if (wc->high_wm_percent_set)
+ extra_args += 2;
+- if (wc->low_wm_percent_set && !wc->cleaner)
++ if (wc->low_wm_percent_set)
+ extra_args += 2;
+ if (wc->max_writeback_jobs_set)
+ extra_args += 2;
+@@ -2479,37 +2511,29 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+ extra_args += 2;
+ if (wc->autocommit_time_set)
+ extra_args += 2;
+- if (wc->max_age != MAX_AGE_UNSPECIFIED)
++ if (wc->max_age_set)
+ extra_args += 2;
+- if (wc->cleaner)
++ if (wc->cleaner_set)
+ extra_args++;
+ if (wc->writeback_fua_set)
+ extra_args++;
+
+ DMEMIT("%u", extra_args);
+- if (wc->start_sector)
++ if (wc->start_sector_set)
+ DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
+- if (wc->high_wm_percent_set && !wc->cleaner) {
+- x = (uint64_t)wc->freelist_high_watermark * 100;
+- x += wc->n_blocks / 2;
+- do_div(x, (size_t)wc->n_blocks);
+- DMEMIT(" high_watermark %u", 100 - (unsigned)x);
+- }
+- if (wc->low_wm_percent_set && !wc->cleaner) {
+- x = (uint64_t)wc->freelist_low_watermark * 100;
+- x += wc->n_blocks / 2;
+- do_div(x, (size_t)wc->n_blocks);
+- DMEMIT(" low_watermark %u", 100 - (unsigned)x);
+- }
++ if (wc->high_wm_percent_set)
++ DMEMIT(" high_watermark %u", wc->high_wm_percent_value);
++ if (wc->low_wm_percent_set)
++ DMEMIT(" low_watermark %u", wc->low_wm_percent_value);
+ if (wc->max_writeback_jobs_set)
+ DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
+ if (wc->autocommit_blocks_set)
+ DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
+ if (wc->autocommit_time_set)
+- DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
+- if (wc->max_age != MAX_AGE_UNSPECIFIED)
+- DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
+- if (wc->cleaner)
++ DMEMIT(" autocommit_time %u", wc->autocommit_time_value);
++ if (wc->max_age_set)
++ DMEMIT(" max_age %u", wc->max_age_value);
++ if (wc->cleaner_set)
+ DMEMIT(" cleaner");
+ if (wc->writeback_fua_set)
+ DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
+@@ -2519,7 +2543,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
+
+ static struct target_type writecache_target = {
+ .name = "writecache",
+- .version = {1, 3, 0},
++ .version = {1, 4, 0},
+ .module = THIS_MODULE,
+ .ctr = writecache_ctr,
+ .dtr = writecache_dtr,
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 7bac564f3faa6..6f03adc128495 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -148,6 +148,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+ #define DM_NUMA_NODE NUMA_NO_NODE
+ static int dm_numa_node = DM_NUMA_NODE;
+
++#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
++static int swap_bios = DEFAULT_SWAP_BIOS;
++static int get_swap_bios(void)
++{
++ int latch = READ_ONCE(swap_bios);
++ if (unlikely(latch <= 0))
++ latch = DEFAULT_SWAP_BIOS;
++ return latch;
++}
++
+ /*
+ * For mempools pre-allocation at the table loading time.
+ */
+@@ -969,6 +979,11 @@ void disable_write_zeroes(struct mapped_device *md)
+ limits->max_write_zeroes_sectors = 0;
+ }
+
++static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
++{
++ return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
++}
++
+ static void clone_endio(struct bio *bio)
+ {
+ blk_status_t error = bio->bi_status;
+@@ -1019,6 +1034,11 @@ static void clone_endio(struct bio *bio)
+ }
+ }
+
++ if (unlikely(swap_bios_limit(tio->ti, bio))) {
++ struct mapped_device *md = io->md;
++ up(&md->swap_bios_semaphore);
++ }
++
+ free_tio(tio);
+ dec_pending(io, error);
+ }
+@@ -1128,7 +1148,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
+ if (!map)
+ goto out;
+
+- ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
++ ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
+
+ out:
+ dm_put_live_table(md, srcu_idx);
+@@ -1252,6 +1272,22 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+ }
+ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+
++static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
++{
++ mutex_lock(&md->swap_bios_lock);
++ while (latch < md->swap_bios) {
++ cond_resched();
++ down(&md->swap_bios_semaphore);
++ md->swap_bios--;
++ }
++ while (latch > md->swap_bios) {
++ cond_resched();
++ up(&md->swap_bios_semaphore);
++ md->swap_bios++;
++ }
++ mutex_unlock(&md->swap_bios_lock);
++}
++
+ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ {
+ int r;
+@@ -1271,6 +1307,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ atomic_inc(&io->io_count);
+ sector = clone->bi_iter.bi_sector;
+
++ if (unlikely(swap_bios_limit(ti, clone))) {
++ struct mapped_device *md = io->md;
++ int latch = get_swap_bios();
++ if (unlikely(latch != md->swap_bios))
++ __set_swap_bios_limit(md, latch);
++ down(&md->swap_bios_semaphore);
++ }
++
+ r = ti->type->map(ti, clone);
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+@@ -1281,10 +1325,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ ret = submit_bio_noacct(clone);
+ break;
+ case DM_MAPIO_KILL:
++ if (unlikely(swap_bios_limit(ti, clone))) {
++ struct mapped_device *md = io->md;
++ up(&md->swap_bios_semaphore);
++ }
+ free_tio(tio);
+ dec_pending(io, BLK_STS_IOERR);
+ break;
+ case DM_MAPIO_REQUEUE:
++ if (unlikely(swap_bios_limit(ti, clone))) {
++ struct mapped_device *md = io->md;
++ up(&md->swap_bios_semaphore);
++ }
+ free_tio(tio);
+ dec_pending(io, BLK_STS_DM_REQUEUE);
+ break;
+@@ -1747,6 +1799,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
++ mutex_destroy(&md->swap_bios_lock);
+
+ dm_mq_cleanup_mapped_device(md);
+ }
+@@ -1814,6 +1867,10 @@ static struct mapped_device *alloc_dev(int minor)
+ init_waitqueue_head(&md->eventq);
+ init_completion(&md->kobj_holder.completion);
+
++ md->swap_bios = get_swap_bios();
++ sema_init(&md->swap_bios_semaphore, md->swap_bios);
++ mutex_init(&md->swap_bios_lock);
++
+ md->disk->major = _major;
+ md->disk->first_minor = minor;
+ md->disk->fops = &dm_blk_dops;
+@@ -3097,6 +3154,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+ module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+
++module_param(swap_bios, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
++
+ MODULE_DESCRIPTION(DM_NAME " driver");
+ MODULE_AUTHOR("Joe Thornber ");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index fffe1e289c533..b441ad772c188 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -73,7 +73,7 @@ void dm_table_free_md_mempools(struct dm_table *t);
+ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+ bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
+ int *blocksize);
+-int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
++int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data);
+
+ void dm_lock_md_type(struct mapped_device *md);
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 2b9d81e4794a4..6eed3209ee2d3 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -1000,6 +1000,7 @@ config VIDEO_OV772X
+ tristate "OmniVision OV772x sensor support"
+ depends on I2C && VIDEO_V4L2
+ select REGMAP_SCCB
++ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV772x camera.
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index c82c1493e099d..b1e2476d3c9e6 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -580,7 +580,7 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
+
+ asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
+ source->fwnode,
+- sizeof(*asd));
++ sizeof(struct max9286_asd));
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev for source %u: %ld",
+ i, PTR_ERR(asd));
+diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
+index 148fd4e05029a..866c8c2e8f59a 100644
+--- a/drivers/media/i2c/ov5670.c
++++ b/drivers/media/i2c/ov5670.c
+@@ -2084,7 +2084,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
+
+ /* By default, V4L2_CID_PIXEL_RATE is read only */
+ ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
+- V4L2_CID_PIXEL_RATE, 0,
++ V4L2_CID_PIXEL_RATE,
++ link_freq_configs[0].pixel_rate,
+ link_freq_configs[0].pixel_rate,
+ 1,
+ link_freq_configs[0].pixel_rate);
+diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
+index 6f8ffab8840f4..07b6d0c49bbfa 100644
+--- a/drivers/media/pci/cx25821/cx25821-core.c
++++ b/drivers/media/pci/cx25821/cx25821-core.c
+@@ -976,8 +976,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci,
+ __le32 *cpu;
+ dma_addr_t dma = 0;
+
+- if (NULL != risc->cpu && risc->size < size)
++ if (risc->cpu && risc->size < size) {
+ pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
++ risc->cpu = NULL;
++ }
+ if (NULL == risc->cpu) {
+ cpu = pci_zalloc_consistent(pci, size, &dma);
+ if (NULL == cpu)
+diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
+index 82d7f17e6a024..7a805201034b7 100644
+--- a/drivers/media/pci/intel/ipu3/Kconfig
++++ b/drivers/media/pci/intel/ipu3/Kconfig
+@@ -2,7 +2,8 @@
+ config VIDEO_IPU3_CIO2
+ tristate "Intel ipu3-cio2 driver"
+ depends on VIDEO_V4L2 && PCI
+- depends on (X86 && ACPI) || COMPILE_TEST
++ depends on ACPI || COMPILE_TEST
++ depends on X86
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+index 6cada8a6e50cc..143ba9d90342f 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -1269,7 +1269,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
+ fmt->format.code = formats[0].mbus_code;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+- if (formats[i].mbus_code == fmt->format.code) {
++ if (formats[i].mbus_code == mbus_code) {
+ fmt->format.code = mbus_code;
+ break;
+ }
+diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
+index 39e3c7f8c5b46..76a37fbd84587 100644
+--- a/drivers/media/pci/saa7134/saa7134-empress.c
++++ b/drivers/media/pci/saa7134/saa7134-empress.c
+@@ -282,8 +282,11 @@ static int empress_init(struct saa7134_dev *dev)
+ q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
+ err = vb2_queue_init(q);
+- if (err)
++ if (err) {
++ video_device_release(dev->empress_dev);
++ dev->empress_dev = NULL;
+ return err;
++ }
+ dev->empress_dev->queue = q;
+ dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
+index e6b74e161a055..c0604d9c70119 100644
+--- a/drivers/media/pci/smipcie/smipcie-ir.c
++++ b/drivers/media/pci/smipcie/smipcie-ir.c
+@@ -60,38 +60,44 @@ static void smi_ir_decode(struct smi_rc *ir)
+ {
+ struct smi_dev *dev = ir->dev;
+ struct rc_dev *rc_dev = ir->rc_dev;
+- u32 dwIRControl, dwIRData;
+- u8 index, ucIRCount, readLoop;
++ u32 control, data;
++ u8 index, ir_count, read_loop;
+
+- dwIRControl = smi_read(IR_Init_Reg);
++ control = smi_read(IR_Init_Reg);
+
+- if (dwIRControl & rbIRVld) {
+- ucIRCount = (u8) smi_read(IR_Data_Cnt);
++ dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control);
+
+- readLoop = ucIRCount/4;
+- if (ucIRCount % 4)
+- readLoop += 1;
+- for (index = 0; index < readLoop; index++) {
+- dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++ if (control & rbIRVld) {
++ ir_count = (u8)smi_read(IR_Data_Cnt);
+
+- ir->irData[index*4 + 0] = (u8)(dwIRData);
+- ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
+- ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
+- ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
++ dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count);
++
++ read_loop = ir_count / 4;
++ if (ir_count % 4)
++ read_loop += 1;
++ for (index = 0; index < read_loop; index++) {
++ data = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++ dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data);
++
++ ir->irData[index * 4 + 0] = (u8)(data);
++ ir->irData[index * 4 + 1] = (u8)(data >> 8);
++ ir->irData[index * 4 + 2] = (u8)(data >> 16);
++ ir->irData[index * 4 + 3] = (u8)(data >> 24);
+ }
+- smi_raw_process(rc_dev, ir->irData, ucIRCount);
+- smi_set(IR_Init_Reg, rbIRVld);
++ smi_raw_process(rc_dev, ir->irData, ir_count);
+ }
+
+- if (dwIRControl & rbIRhighidle) {
++ if (control & rbIRhighidle) {
+ struct ir_raw_event rawir = {};
+
++ dev_dbg(&rc_dev->dev, "high idle\n");
++
+ rawir.pulse = 0;
+ rawir.duration = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
+ ir_raw_event_store_with_filter(rc_dev, &rawir);
+- smi_set(IR_Init_Reg, rbIRhighidle);
+ }
+
++ smi_set(IR_Init_Reg, rbIRVld);
+ ir_raw_event_handle(rc_dev);
+ }
+
+@@ -150,7 +156,7 @@ int smi_ir_init(struct smi_dev *dev)
+ rc_dev->dev.parent = &dev->pci_dev->dev;
+
+ rc_dev->map_name = dev->info->rc_map;
+- rc_dev->timeout = MS_TO_US(100);
++ rc_dev->timeout = SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN;
+ rc_dev->rx_resolution = SMI_SAMPLE_PERIOD;
+
+ ir->rc_dev = rc_dev;
+@@ -173,7 +179,7 @@ void smi_ir_exit(struct smi_dev *dev)
+ struct smi_rc *ir = &dev->ir;
+ struct rc_dev *rc_dev = ir->rc_dev;
+
+- smi_ir_stop(ir);
+ rc_unregister_device(rc_dev);
++ smi_ir_stop(ir);
+ ir->rc_dev = NULL;
+ }
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index c46a79eace98b..f2c4dadd6a0eb 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -1551,12 +1551,12 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444);
+
+- if (video->ctrl_handler.error) {
++ rc = video->ctrl_handler.error;
++ if (rc) {
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+
+- dev_err(video->dev, "Failed to init controls: %d\n",
+- video->ctrl_handler.error);
++ dev_err(video->dev, "Failed to init controls: %d\n", rc);
+ return rc;
+ }
+
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
+index c012fd2e1d291..34266fba824f2 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -931,6 +931,7 @@ static int mclk_enable(struct clk_hw *hw)
+ mclk_div = 2;
+ }
+
++ pm_runtime_get_sync(cam->dev);
+ clk_enable(cam->clk[0]);
+ mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
+ mcam_ctlr_power_up(cam);
+@@ -944,6 +945,7 @@ static void mclk_disable(struct clk_hw *hw)
+
+ mcam_ctlr_power_down(cam);
+ clk_disable(cam->clk[0]);
++ pm_runtime_put(cam->dev);
+ }
+
+ static unsigned long mclk_recalc_rate(struct clk_hw *hw,
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+index dfb42e19bf813..be3842e6ca475 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+@@ -303,7 +303,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
+ ret = PTR_ERR((__force void *)dev->reg_base[VENC_SYS]);
+ goto err_res;
+ }
+- mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_SYS]);
++ mtk_v4l2_debug(2, "reg[%d] base=0x%p", VENC_SYS, dev->reg_base[VENC_SYS]);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+@@ -332,7 +332,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
+ ret = PTR_ERR((__force void *)dev->reg_base[VENC_LT_SYS]);
+ goto err_res;
+ }
+- mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[VENC_LT_SYS]);
++ mtk_v4l2_debug(2, "reg[%d] base=0x%p", VENC_LT_SYS, dev->reg_base[VENC_LT_SYS]);
+
+ dev->enc_lt_irq = platform_get_irq(pdev, 1);
+ irq_set_status_flags(dev->enc_lt_irq, IRQ_NOAUTOEN);
+diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+index 5ea153a685225..d9880210b2ab6 100644
+--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
++++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+@@ -890,7 +890,8 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
+
+ if (vsi->show_frame & BIT(2)) {
+- if (vpu_dec_start(&inst->vpu, NULL, 0)) {
++ ret = vpu_dec_start(&inst->vpu, NULL, 0);
++ if (ret) {
+ mtk_vcodec_err(inst, "vpu trig decoder failed");
+ goto DECODE_ERROR;
+ }
+diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
+index b664ce7558a1a..75fad9689c901 100644
+--- a/drivers/media/platform/pxa_camera.c
++++ b/drivers/media/platform/pxa_camera.c
+@@ -1386,6 +1386,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ int ret = 0;
++#ifdef DEBUG
++ int i;
++#endif
+
+ switch (pcdev->channels) {
+ case 1:
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index bd9334af1c734..97cea7c4d7697 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -579,7 +579,7 @@ static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+ break;
+ }
+
+- if (k < f->index)
++ if (k == -1 || k < f->index)
+ /*
+ * All the unique pixel formats matching the arguments
+ * have been enumerated (k >= 0 and f->index > 0), or
+@@ -961,6 +961,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ video->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
++ ret = -EINVAL;
+ goto error_video_register;
+ }
+
+diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
+index 59a0266b1f399..2eef245c31a17 100644
+--- a/drivers/media/platform/ti-vpe/cal.c
++++ b/drivers/media/platform/ti-vpe/cal.c
+@@ -406,7 +406,7 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
+ */
+
+ struct cal_v4l2_async_subdev {
+- struct v4l2_async_subdev asd;
++ struct v4l2_async_subdev asd; /* Must be first */
+ struct cal_camerarx *phy;
+ };
+
+@@ -472,7 +472,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
+ fwnode = of_fwnode_handle(phy->sensor_node);
+ asd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
+ fwnode,
+- sizeof(*asd));
++ sizeof(*casd));
+ if (IS_ERR(asd)) {
+ phy_err(phy, "Failed to add subdev to notifier\n");
+ ret = PTR_ERR(asd);
+diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
+index dc62533cf32ce..aa66e4f5f3f34 100644
+--- a/drivers/media/platform/vsp1/vsp1_drv.c
++++ b/drivers/media/platform/vsp1/vsp1_drv.c
+@@ -882,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev)
+ }
+
+ done:
+- if (ret)
++ if (ret) {
+ pm_runtime_disable(&pdev->dev);
++ rcar_fcp_put(vsp1->fcp);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
+index e0242c9b6aeb1..3e729a17b35ff 100644
+--- a/drivers/media/rc/ir_toy.c
++++ b/drivers/media/rc/ir_toy.c
+@@ -491,6 +491,7 @@ static void irtoy_disconnect(struct usb_interface *intf)
+
+ static const struct usb_device_id irtoy_table[] = {
+ { USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xfd08, USB_CLASS_CDC_DATA) },
++ { USB_DEVICE_INTERFACE_CLASS(0x04d8, 0xf58b, USB_CLASS_CDC_DATA) },
+ { }
+ };
+
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index f1dbd059ed087..c8d63673e131d 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -1169,7 +1169,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
+ switch (subcmd) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+- if (buf_in[5] == 0)
++ if (buf_in[5] == 0 && *hi < 8)
+ ir->txports_cabled |= 1 << *hi;
+ break;
+
+diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.c b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+index 4511a2a98405d..1724bb485e670 100644
+--- a/drivers/media/test-drivers/vidtv/vidtv_psi.c
++++ b/drivers/media/test-drivers/vidtv/vidtv_psi.c
+@@ -1164,6 +1164,8 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
+ struct vidtv_psi_desc *table_descriptor = args->pmt->descriptor;
+ struct vidtv_psi_table_pmt_stream *stream = args->pmt->stream;
+ struct vidtv_psi_desc *stream_descriptor;
++ u32 crc = INITIAL_CRC;
++ u32 nbytes = 0;
+ struct header_write_args h_args = {
+ .dest_buf = args->buf,
+ .dest_offset = args->offset,
+@@ -1181,6 +1183,7 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
+ .new_psi_section = false,
+ .is_crc = false,
+ .dest_buf_sz = args->buf_sz,
++ .crc = &crc,
+ };
+ struct desc_write_args d_args = {
+ .dest_buf = args->buf,
+@@ -1193,8 +1196,6 @@ u32 vidtv_psi_pmt_write_into(struct vidtv_psi_pmt_write_args *args)
+ .pid = args->pid,
+ .dest_buf_sz = args->buf_sz,
+ };
+- u32 crc = INITIAL_CRC;
+- u32 nbytes = 0;
+
+ vidtv_psi_pmt_table_update_sec_len(args->pmt);
+
+diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
+index 0e26d22f0b268..53aa2558f71e1 100644
+--- a/drivers/media/tuners/qm1d1c0042.c
++++ b/drivers/media/tuners/qm1d1c0042.c
+@@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
+ if (val == reg_initval[reg_index][0x00])
+ break;
+ }
+- if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
++ if (reg_index >= QM1D1C0042_NUM_REG_ROWS) {
++ ret = -EINVAL;
+ goto failed;
++ }
+ memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
+ usleep_range(2000, 3000);
+
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 5a7a9522d46da..9ddda8d68ee0f 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -391,7 +391,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+- lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
++ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
+
+ usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+ info("INT Interrupt Service Started");
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index e6088b5d1b805..3daa64bb1e1d9 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -956,14 +956,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+
+ usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL);
+ if (!usb_bufs->buf[i]) {
+- em28xx_uninit_usb_xfer(dev, mode);
+-
+ for (i--; i >= 0; i--)
+ kfree(usb_bufs->buf[i]);
+
+- kfree(usb_bufs->buf);
+- usb_bufs->buf = NULL;
+-
++ em28xx_uninit_usb_xfer(dev, mode);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
+index 19c90fa9e443d..293a460f4616c 100644
+--- a/drivers/media/usb/tm6000/tm6000-dvb.c
++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
+@@ -141,6 +141,10 @@ static int tm6000_start_stream(struct tm6000_core *dev)
+ if (ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
+ ret, __func__);
++
++ kfree(dvb->bulk_urb->transfer_buffer);
++ usb_free_urb(dvb->bulk_urb);
++ dvb->bulk_urb = NULL;
+ return ret;
+ } else
+ printk(KERN_ERR "tm6000: pipe reset\n");
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index fa06bfa174ad3..c7172b8952a96 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -248,7 +248,9 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ goto done;
+
+ /* After the probe, update fmt with the values returned from
+- * negotiation with the device.
++ * negotiation with the device. Some devices return invalid bFormatIndex
++ * and bFrameIndex values, in which case we can only assume they have
++ * accepted the requested format as-is.
+ */
+ for (i = 0; i < stream->nformats; ++i) {
+ if (probe->bFormatIndex == stream->format[i].index) {
+@@ -257,11 +259,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ }
+ }
+
+- if (i == stream->nformats) {
+- uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
++ if (i == stream->nformats)
++ uvc_trace(UVC_TRACE_FORMAT,
++ "Unknown bFormatIndex %u, using default\n",
+ probe->bFormatIndex);
+- return -EINVAL;
+- }
+
+ for (i = 0; i < format->nframes; ++i) {
+ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
+@@ -270,11 +271,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ }
+ }
+
+- if (i == format->nframes) {
+- uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
++ if (i == format->nframes)
++ uvc_trace(UVC_TRACE_FORMAT,
++ "Unknown bFrameIndex %u, using default\n",
+ probe->bFrameIndex);
+- return -EINVAL;
+- }
+
+ fmt->fmt.pix.width = frame->wWidth;
+ fmt->fmt.pix.height = frame->wHeight;
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index 3198abdd538ce..9906b41004e9b 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -3283,7 +3283,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ v4l2_kioctl func)
+ {
+ char sbuf[128];
+- void *mbuf = NULL;
++ void *mbuf = NULL, *array_buf = NULL;
+ void *parg = (void *)arg;
+ long err = -EINVAL;
+ bool has_array_args;
+@@ -3318,27 +3318,21 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ has_array_args = err;
+
+ if (has_array_args) {
+- /*
+- * When adding new types of array args, make sure that the
+- * parent argument to ioctl (which contains the pointer to the
+- * array) fits into sbuf (so that mbuf will still remain
+- * unused up to here).
+- */
+- mbuf = kvmalloc(array_size, GFP_KERNEL);
++ array_buf = kvmalloc(array_size, GFP_KERNEL);
+ err = -ENOMEM;
+- if (NULL == mbuf)
++ if (array_buf == NULL)
+ goto out_array_args;
+ err = -EFAULT;
+ if (in_compat_syscall())
+- err = v4l2_compat_get_array_args(file, mbuf, user_ptr,
+- array_size, orig_cmd,
+- parg);
++ err = v4l2_compat_get_array_args(file, array_buf,
++ user_ptr, array_size,
++ orig_cmd, parg);
+ else
+- err = copy_from_user(mbuf, user_ptr, array_size) ?
++ err = copy_from_user(array_buf, user_ptr, array_size) ?
+ -EFAULT : 0;
+ if (err)
+ goto out_array_args;
+- *kernel_ptr = mbuf;
++ *kernel_ptr = array_buf;
+ }
+
+ /* Handles IOCTL */
+@@ -3360,12 +3354,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ if (in_compat_syscall()) {
+ int put_err;
+
+- put_err = v4l2_compat_put_array_args(file, user_ptr, mbuf,
+- array_size, orig_cmd,
+- parg);
++ put_err = v4l2_compat_put_array_args(file, user_ptr,
++ array_buf,
++ array_size,
++ orig_cmd, parg);
+ if (put_err)
+ err = put_err;
+- } else if (copy_to_user(user_ptr, mbuf, array_size)) {
++ } else if (copy_to_user(user_ptr, array_buf, array_size)) {
+ err = -EFAULT;
+ }
+ goto out_array_args;
+@@ -3381,6 +3376,7 @@ out_array_args:
+ if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
+ err = -EFAULT;
+ out:
++ kvfree(array_buf);
+ kvfree(mbuf);
+ return err;
+ }
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index ac350f8d1e20f..82d09b88240e1 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -130,7 +130,7 @@ static void mtk_smi_clk_disable(const struct mtk_smi *smi)
+
+ int mtk_smi_larb_get(struct device *larbdev)
+ {
+- int ret = pm_runtime_get_sync(larbdev);
++ int ret = pm_runtime_resume_and_get(larbdev);
+
+ return (ret < 0) ? ret : 0;
+ }
+@@ -374,7 +374,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+ int ret;
+
+ /* Power on smi-common. */
+- ret = pm_runtime_get_sync(larb->smi_common_dev);
++ ret = pm_runtime_resume_and_get(larb->smi_common_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+ return ret;
+diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
+index 159a16f5e7d67..51d20c2ccb755 100644
+--- a/drivers/memory/ti-aemif.c
++++ b/drivers/memory/ti-aemif.c
+@@ -378,8 +378,10 @@ static int aemif_probe(struct platform_device *pdev)
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_aemif_parse_abus_config(pdev, child_np);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(child_np);
+ goto error;
++ }
+ }
+ } else if (pdata && pdata->num_abus_data > 0) {
+ for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
+@@ -405,8 +407,10 @@ static int aemif_probe(struct platform_device *pdev)
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_platform_populate(child_np, NULL,
+ dev_lookup, dev);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(child_np);
+ goto error;
++ }
+ }
+ } else if (pdata) {
+ for (i = 0; i < pdata->num_sub_devices; i++) {
+diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
+index 193a96c8b1eab..20cb294c75122 100644
+--- a/drivers/mfd/altera-sysmgr.c
++++ b/drivers/mfd/altera-sysmgr.c
+@@ -145,7 +145,8 @@ static int sysmgr_probe(struct platform_device *pdev)
+ sysmgr_config.reg_write = s10_protected_reg_write;
+
+ /* Need physical address for SMCC call */
+- regmap = devm_regmap_init(dev, NULL, (void *)res->start,
++ regmap = devm_regmap_init(dev, NULL,
++ (void *)(uintptr_t)res->start,
+ &sysmgr_config);
+ } else {
+ base = devm_ioremap(dev, res->start, resource_size(res));
+diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
+index fab3cdc27ed64..19d57a45134c6 100644
+--- a/drivers/mfd/bd9571mwv.c
++++ b/drivers/mfd/bd9571mwv.c
+@@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client,
+ return ret;
+ }
+
+- ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells,
+- ARRAY_SIZE(bd9571mwv_cells), NULL, 0,
+- regmap_irq_get_domain(bd->irq_data));
++ ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO,
++ bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells),
++ NULL, 0, regmap_irq_get_domain(bd->irq_data));
+ if (ret) {
+ regmap_del_irq_chip(bd->irq, bd->irq_data);
+ return ret;
+diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
+index 576da62fbb0ce..d87876747b913 100644
+--- a/drivers/mfd/gateworks-gsc.c
++++ b/drivers/mfd/gateworks-gsc.c
+@@ -234,7 +234,7 @@ static int gsc_probe(struct i2c_client *client)
+
+ ret = devm_regmap_add_irq_chip(dev, gsc->regmap, client->irq,
+ IRQF_ONESHOT | IRQF_SHARED |
+- IRQF_TRIGGER_FALLING, 0,
++ IRQF_TRIGGER_LOW, 0,
+ &gsc_irq_chip, &irq_data);
+ if (ret)
+ return ret;
+diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
+index 8a7cc0f86958b..65b98f3fbd929 100644
+--- a/drivers/mfd/wm831x-auxadc.c
++++ b/drivers/mfd/wm831x-auxadc.c
+@@ -93,11 +93,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
+ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
+
+ mutex_lock(&wm831x->auxadc_lock);
+-
+- list_del(&req->list);
+ ret = req->val;
+
+ out:
++ list_del(&req->list);
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ kfree(req);
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index 8859011672cb9..8200af22b529e 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -398,6 +398,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+ rts5227_extra_init_hw(pcr);
+
++ /* Power down OCP for power consumption */
++ if (!pcr->card_exist)
++ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
++ OC_POWER_DOWN);
++
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
+ FUNC_FORCE_UPME_XMT_DBG);
+ rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
+diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
+index 7c45f82b43027..d92c4d2c521a3 100644
+--- a/drivers/misc/eeprom/eeprom_93xx46.c
++++ b/drivers/misc/eeprom/eeprom_93xx46.c
+@@ -512,3 +512,4 @@ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
+ MODULE_AUTHOR("Anatolij Gustschin ");
+ MODULE_ALIAS("spi:93xx46");
++MODULE_ALIAS("spi:eeprom-93xx46");
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 70eb5ed942d03..f12e909034ac0 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -520,12 +520,13 @@ fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
+ {
+ struct fastrpc_dma_buf_attachment *a = attachment->priv;
+ struct sg_table *table;
++ int ret;
+
+ table = &a->sgt;
+
+- if (!dma_map_sgtable(attachment->dev, table, dir, 0))
+- return ERR_PTR(-ENOMEM);
+-
++ ret = dma_map_sgtable(attachment->dev, table, dir, 0);
++ if (ret)
++ table = ERR_PTR(ret);
+ return table;
+ }
+
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 2907db260fba5..bf0407e8905c4 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -60,6 +60,13 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag,
+ goto out;
+ }
+
++ if (vtag) {
++ /* Check if vtag is supported by client */
++ rets = mei_cl_vt_support_check(cl);
++ if (rets)
++ goto out;
++ }
++
+ if (length > mei_cl_mtu(cl)) {
+ rets = -EFBIG;
+ goto out;
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index 686e8b6a4c55e..0cba3c6dfb148 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -1373,7 +1373,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+ return -EPROTO;
+ }
+
+- dev->dev_state = MEI_DEV_POWER_DOWN;
++ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ dev_info(dev->dev, "hbm: stop response: resetting.\n");
+ /* force the reset */
+ return -EPROTO;
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 9cf8d8f60cfef..14be76d4c2e61 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -101,6 +101,11 @@
+ #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
+ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+
++#define MEI_DEV_ID_EBG 0x1BE0 /* Emmitsburg WS */
++
++#define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */
++#define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */
++
+ /*
+ * MEI HW Section
+ */
+diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
+index 326955b04fda9..2161c1234ad72 100644
+--- a/drivers/misc/mei/interrupt.c
++++ b/drivers/misc/mei/interrupt.c
+@@ -295,12 +295,17 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
+ static inline int hdr_is_valid(u32 msg_hdr)
+ {
+ struct mei_msg_hdr *mei_hdr;
++ u32 expected_len = 0;
+
+ mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
+ if (!msg_hdr || mei_hdr->reserved)
+ return -EBADMSG;
+
+- if (mei_hdr->dma_ring && mei_hdr->length != MEI_SLOT_SIZE)
++ if (mei_hdr->dma_ring)
++ expected_len += MEI_SLOT_SIZE;
++ if (mei_hdr->extended)
++ expected_len += MEI_SLOT_SIZE;
++ if (mei_hdr->length < expected_len)
+ return -EBADMSG;
+
+ return 0;
+@@ -324,6 +329,8 @@ int mei_irq_read_handler(struct mei_device *dev,
+ struct mei_cl *cl;
+ int ret;
+ u32 ext_meta_hdr_u32;
++ u32 hdr_size_left;
++ u32 hdr_size_ext;
+ int i;
+ int ext_hdr_end;
+
+@@ -353,6 +360,7 @@ int mei_irq_read_handler(struct mei_device *dev,
+ }
+
+ ext_hdr_end = 1;
++ hdr_size_left = mei_hdr->length;
+
+ if (mei_hdr->extended) {
+ if (!dev->rd_msg_hdr[1]) {
+@@ -363,8 +371,21 @@ int mei_irq_read_handler(struct mei_device *dev,
+ dev_dbg(dev->dev, "extended header is %08x\n",
+ ext_meta_hdr_u32);
+ }
+- meta_hdr = ((struct mei_ext_meta_hdr *)
+- dev->rd_msg_hdr + 1);
++ meta_hdr = ((struct mei_ext_meta_hdr *)dev->rd_msg_hdr + 1);
++ if (check_add_overflow((u32)sizeof(*meta_hdr),
++ mei_slots2data(meta_hdr->size),
++ &hdr_size_ext)) {
++ dev_err(dev->dev, "extended message size too big %d\n",
++ meta_hdr->size);
++ return -EBADMSG;
++ }
++ if (hdr_size_left < hdr_size_ext) {
++ dev_err(dev->dev, "corrupted message header len %d\n",
++ mei_hdr->length);
++ return -EBADMSG;
++ }
++ hdr_size_left -= hdr_size_ext;
++
+ ext_hdr_end = meta_hdr->size + 2;
+ for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
+ dev->rd_msg_hdr[i] = mei_read_hdr(dev);
+@@ -376,6 +397,12 @@ int mei_irq_read_handler(struct mei_device *dev,
+ }
+
+ if (mei_hdr->dma_ring) {
++ if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
++ dev_err(dev->dev, "corrupted message header len %d\n",
++ mei_hdr->length);
++ return -EBADMSG;
++ }
++
+ dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
+ dev->rd_msg_hdr_count++;
+ (*slots)--;
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 1de9ef7a272ba..a7e179626b635 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -107,6 +107,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
+
++ {MEI_PCI_DEVICE(MEI_DEV_ID_EBG, MEI_ME_PCH15_SPS_CFG)},
++
++ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
++
+ /* required last entry */
+ {0, }
+ };
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index c49065887e8f5..c2338750313c4 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -537,6 +537,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
+
+ queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
+
++ if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
++ return NULL;
++
+ queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+ if (queue) {
+ queue->q_header = NULL;
+@@ -630,7 +633,7 @@ static void qp_release_pages(struct page **pages,
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+- set_page_dirty(pages[i]);
++ set_page_dirty_lock(pages[i]);
+
+ put_page(pages[i]);
+ pages[i] = NULL;
+diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
+index 53b81582f1afe..5490962dc8e53 100644
+--- a/drivers/mmc/host/owl-mmc.c
++++ b/drivers/mmc/host/owl-mmc.c
+@@ -640,7 +640,7 @@ static int owl_mmc_probe(struct platform_device *pdev)
+ owl_host->irq = platform_get_irq(pdev, 0);
+ if (owl_host->irq < 0) {
+ ret = -EINVAL;
+- goto err_free_host;
++ goto err_release_channel;
+ }
+
+ ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
+@@ -648,19 +648,21 @@ static int owl_mmc_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n",
+ owl_host->irq);
+- goto err_free_host;
++ goto err_release_channel;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add host\n");
+- goto err_free_host;
++ goto err_release_channel;
+ }
+
+ dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
+
+ return 0;
+
++err_release_channel:
++ dma_release_channel(owl_host->dma);
+ err_free_host:
+ mmc_free_host(mmc);
+
+@@ -674,6 +676,7 @@ static int owl_mmc_remove(struct platform_device *pdev)
+
+ mmc_remove_host(mmc);
+ disable_irq(owl_host->irq);
++ dma_release_channel(owl_host->dma);
+ mmc_free_host(mmc);
+
+ return 0;
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index fe13e1ea22dcc..f3e76d6b3e3fe 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -186,8 +186,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
+ mmc_get_dma_dir(data)))
+ goto force_pio;
+
+- /* This DMAC cannot handle if buffer is not 8-bytes alignment */
+- if (!IS_ALIGNED(sg_dma_address(sg), 8))
++ /* This DMAC cannot handle if buffer is not 128-bytes alignment */
++ if (!IS_ALIGNED(sg_dma_address(sg), 128))
+ goto force_pio_with_unmap;
+
+ if (data->flags & MMC_DATA_READ) {
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 16ed19f479392..a20459744d213 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1666,9 +1666,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
++ int dead;
+
+ pm_runtime_get_sync(&pdev->dev);
++ dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index fa76748d89293..94e3f72f6405d 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -33,6 +33,8 @@
+ #define O2_SD_ADMA2 0xE7
+ #define O2_SD_INF_MOD 0xF1
+ #define O2_SD_MISC_CTRL4 0xFC
++#define O2_SD_MISC_CTRL 0x1C0
++#define O2_SD_PWR_FORCE_L0 0x0002
+ #define O2_SD_TUNING_CTRL 0x300
+ #define O2_SD_PLL_SETTING 0x304
+ #define O2_SD_MISC_SETTING 0x308
+@@ -300,6 +302,8 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ {
+ struct sdhci_host *host = mmc_priv(mmc);
+ int current_bus_width = 0;
++ u32 scratch32 = 0;
++ u16 scratch = 0;
+
+ /*
+ * This handler only implements the eMMC tuning that is specific to
+@@ -312,6 +316,17 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+ (opcode != MMC_SEND_TUNING_BLOCK)))
+ return -EINVAL;
++
++ /* Force power mode enter L0 */
++ scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
++ scratch |= O2_SD_PWR_FORCE_L0;
++ sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
++
++ /* wait DLL lock, timeout value 5ms */
++ if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
++ scratch32, (scratch32 & O2_DLL_LOCK_STATUS), 1, 5000))
++ pr_warn("%s: DLL can't lock in 5ms after force L0 during tuning.\n",
++ mmc_hostname(host->mmc));
+ /*
+ * Judge the tuning reason, whether caused by dll shift
+ * If cause by dll shift, should call sdhci_o2_dll_recovery
+@@ -344,6 +359,11 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ sdhci_set_bus_width(host, current_bus_width);
+ }
+
++ /* Cancel force power mode enter L0 */
++ scratch = sdhci_readw(host, O2_SD_MISC_CTRL);
++ scratch &= ~(O2_SD_PWR_FORCE_L0);
++ sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
++
+ sdhci_reset(host, SDHCI_RESET_CMD);
+ sdhci_reset(host, SDHCI_RESET_DATA);
+
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index f85171edabeb9..5dc36efff47ff 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -708,14 +708,14 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+- struct mmc_host *mmc = host->mmc;
+
+- mmc_remove_host(mmc);
++ sdhci_remove_host(host, 0);
++
+ clk_disable_unprepare(sprd_host->clk_sdio);
+ clk_disable_unprepare(sprd_host->clk_enable);
+ clk_disable_unprepare(sprd_host->clk_2x_enable);
+
+- mmc_free_host(mmc);
++ sdhci_pltfm_free(pdev);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index e2d5112d809dc..615f3d008af1e 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1858,10 +1858,12 @@ static int usdhi6_probe(struct platform_device *pdev)
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+- goto e_clk_off;
++ goto e_release_dma;
+
+ return 0;
+
++e_release_dma:
++ usdhi6_dma_release(host);
+ e_clk_off:
+ clk_disable_unprepare(host->clk);
+ e_free_mmc:
+diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
+index cfd170946ba48..5b04ae6c30573 100644
+--- a/drivers/mtd/devices/phram.c
++++ b/drivers/mtd/devices/phram.c
+@@ -222,6 +222,7 @@ static int phram_setup(const char *val)
+ uint64_t start;
+ uint64_t len;
+ uint64_t erasesize = PAGE_SIZE;
++ uint32_t rem;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf))
+@@ -263,8 +264,11 @@ static int phram_setup(const char *val)
+ }
+ }
+
++ if (erasesize)
++ div_u64_rem(len, (uint32_t)erasesize, &rem);
++
+ if (len == 0 || erasesize == 0 || erasesize > len
+- || erasesize > UINT_MAX || do_div(len, (uint32_t)erasesize) != 0) {
++ || erasesize > UINT_MAX || rem) {
+ parse_err("illegal erasesize or len\n");
+ goto error;
+ }
+diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
+index a304fda5d1fa5..8b49fd56cf964 100644
+--- a/drivers/mtd/nand/raw/intel-nand-controller.c
++++ b/drivers/mtd/nand/raw/intel-nand-controller.c
+@@ -318,8 +318,10 @@ static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir,
+ }
+
+ tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags);
+- if (!tx)
+- return -ENXIO;
++ if (!tx) {
++ ret = -ENXIO;
++ goto err_unmap;
++ }
+
+ tx->callback = callback;
+ tx->callback_param = ebu_host;
+diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
+index 980e332bdac48..26116694c821b 100644
+--- a/drivers/mtd/parsers/afs.c
++++ b/drivers/mtd/parsers/afs.c
+@@ -370,10 +370,8 @@ static int parse_afs_partitions(struct mtd_info *mtd,
+ return i;
+
+ out_free_parts:
+- while (i >= 0) {
++ while (--i >= 0)
+ kfree(parts[i].name);
+- i--;
+- }
+ kfree(parts);
+ *pparts = NULL;
+ return ret;
+diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
+index d69607b482272..fab0949aabba1 100644
+--- a/drivers/mtd/parsers/parser_imagetag.c
++++ b/drivers/mtd/parsers/parser_imagetag.c
+@@ -83,6 +83,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -92,6 +93,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -100,6 +102,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -108,6 +111,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
++ ret = -EINVAL;
+ goto out;
+ }
+
+diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+index 7c26f8f565cba..47fbf1d1e5573 100644
+--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
++++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+@@ -399,8 +399,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host)
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = hisi_spi_nor_register(np, host);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ goto fail;
++ }
+
+ if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 20df44b753dab..b17faccc95c43 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -1364,14 +1364,15 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+
+ erase = &map->erase_type[i];
+
++ /* Alignment is not mandatory for overlaid regions */
++ if (region->offset & SNOR_OVERLAID_REGION &&
++ region->size <= len)
++ return erase;
++
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+- /* Alignment is not mandatory for overlaid regions */
+- if (region->offset & SNOR_OVERLAID_REGION)
+- return erase;
+-
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (rem)
+ continue;
+@@ -1515,6 +1516,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
++ erase->size != cmd->size ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
+index 6ee7719e59037..25142ec4737b7 100644
+--- a/drivers/mtd/spi-nor/sfdp.c
++++ b/drivers/mtd/spi-nor/sfdp.c
+@@ -788,7 +788,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+- if (!(erase_type & BIT(i)))
++ if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+@@ -858,6 +858,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
++ spi_nor_region_mark_end(®ion[i - 1]);
+
+ save_uniform_erase_type = map->uniform_erase_type;
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+@@ -881,8 +882,6 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
+- spi_nor_region_mark_end(®ion[i - 1]);
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index 260f9f46668b8..63339d29be905 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -87,7 +87,7 @@ config WIREGUARD
+ select CRYPTO_CURVE25519_X86 if X86 && 64BIT
+ select ARM_CRYPTO if ARM
+ select ARM64_CRYPTO if ARM64
+- select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
++ select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
+ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_POLY1305_ARM if ARM
+ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index f07e8b737d31e..ee39e79927efb 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -2901,7 +2901,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ spi_get_device_id(spi)->driver_data;
+
+ /* Errata Reference:
+- * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
++ * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
+ *
+ * The SPI can write corrupted data to the RAM at fast SPI
+ * speeds:
+diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
+index 45fdb1256dbfe..0f1ee4a4fa55a 100644
+--- a/drivers/net/dsa/ocelot/felix.c
++++ b/drivers/net/dsa/ocelot/felix.c
+@@ -654,14 +654,18 @@ static void felix_teardown(struct dsa_switch *ds)
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+- if (felix->info->mdio_bus_free)
+- felix->info->mdio_bus_free(ocelot);
+-
+- for (port = 0; port < ocelot->num_phys_ports; port++)
+- ocelot_deinit_port(ocelot, port);
+ ocelot_deinit_timestamp(ocelot);
+- /* stop workqueue thread */
+ ocelot_deinit(ocelot);
++
++ for (port = 0; port < ocelot->num_phys_ports; port++) {
++ if (dsa_is_unused_port(ds, port))
++ continue;
++
++ ocelot_deinit_port(ocelot, port);
++ }
++
++ if (felix->info->mdio_bus_free)
++ felix->info->mdio_bus_free(ocelot);
+ }
+
+ static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index b40d4377cc71d..b2cd3bdba9f89 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1279,10 +1279,18 @@
+ #define MDIO_PMA_10GBR_FECCTRL 0x00ab
+ #endif
+
++#ifndef MDIO_PMA_RX_CTRL1
++#define MDIO_PMA_RX_CTRL1 0x8051
++#endif
++
+ #ifndef MDIO_PCS_DIG_CTRL
+ #define MDIO_PCS_DIG_CTRL 0x8000
+ #endif
+
++#ifndef MDIO_PCS_DIGITAL_STAT
++#define MDIO_PCS_DIGITAL_STAT 0x8010
++#endif
++
+ #ifndef MDIO_AN_XNP
+ #define MDIO_AN_XNP 0x0016
+ #endif
+@@ -1358,6 +1366,8 @@
+ #define XGBE_KR_TRAINING_ENABLE BIT(1)
+
+ #define XGBE_PCS_CL37_BP BIT(12)
++#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
++#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
+
+ #define XGBE_AN_CL37_INT_CMPLT BIT(0)
+ #define XGBE_AN_CL37_INT_MASK 0x01
+@@ -1375,6 +1385,10 @@
+ #define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
+ #define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+
++#define XGBE_PMA_RX_RST_0_MASK BIT(4)
++#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
++#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
++
+ /* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 2709a2db56577..395eb0b526802 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1368,6 +1368,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ return;
+
+ netif_tx_stop_all_queues(netdev);
++ netif_carrier_off(pdata->netdev);
+
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 93ef5a30cb8d9..4e97b48695220 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ &an_restart);
+ if (an_restart) {
+ xgbe_phy_config_aneg(pdata);
+- return;
++ goto adjust_link;
+ }
+
+ if (pdata->phy.link) {
+@@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ pdata->phy.link = 0;
+- netif_carrier_off(pdata->netdev);
+
+ xgbe_phy_adjust_link(pdata);
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 859ded0c06b05..18e48b3bc402b 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -922,6 +922,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ if ((phy_id & 0xfffffff0) != 0x03625d10)
+ return false;
+
++ /* Reset PHY - wait for self-clearing reset bit to clear */
++ genphy_soft_reset(phy_data->phydev);
++
+ /* Disable RGMII mode */
+ phy_write(phy_data->phydev, 0x18, 0x7007);
+ reg = phy_read(phy_data->phydev, 0x18);
+@@ -1953,6 +1956,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+ xgbe_phy_put_comm_ownership(pdata);
+ }
+
++static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
++{
++ int reg;
++
++ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
++ XGBE_PCS_PSEQ_STATE_MASK);
++ if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
++ /* Mailbox command timed out, reset of RX block is required.
++ * This can be done by asseting the reset bit and wait for
++ * its compeletion.
++ */
++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
++ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
++ ndelay(20);
++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
++ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
++ usleep_range(40, 50);
++ netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
++ }
++}
++
+ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int cmd, unsigned int sub_cmd)
+ {
+@@ -1960,9 +1984,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int wait;
+
+ /* Log if a previous command did not complete */
+- if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox not ready for command\n");
++ xgbe_phy_rx_reset(pdata);
++ }
+
+ /* Construct the command */
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+@@ -1984,6 +2010,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox command did not complete\n");
++
++ /* Reset on error */
++ xgbe_phy_rx_reset(pdata);
+ }
+
+ static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+@@ -2584,6 +2613,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
++ if (pdata->phy.autoneg == AUTONEG_ENABLE &&
++ phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
++ if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
++ netif_carrier_off(pdata->netdev);
++ *an_restart = 1;
++ }
++ }
++
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ phy_data->rrc_count = 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index d10e4f85dd11a..1c96b7ba24f28 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8856,9 +8856,10 @@ void bnxt_tx_disable(struct bnxt *bp)
+ txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ }
+ }
++ /* Drop carrier first to prevent TX timeout */
++ netif_carrier_off(bp->dev);
+ /* Stop all TX queues */
+ netif_tx_disable(bp->dev);
+- netif_carrier_off(bp->dev);
+ }
+
+ void bnxt_tx_enable(struct bnxt *bp)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 6b7b69ed62db0..a9bcf887d2fbe 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -472,8 +472,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
+ u32 ver = nvm_cfg_ver.vu32;
+
+- sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
+- ver & 0xF);
++ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
++ ver & 0xf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ buf);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+index 1b49f2fa9b185..34546f5312eee 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+@@ -46,6 +46,9 @@
+ #define MAX_ULD_QSETS 16
+ #define MAX_ULD_NPORTS 4
+
++/* ulp_mem_io + ulptx_idata + payload + padding */
++#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
++
+ /* CPL message priority levels */
+ enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 196652a114c5f..3334c9e2152ab 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2842,17 +2842,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+- * data. We currently use the same limit as for Ethernet packets.
++ * data.
++ * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
++ * However, FW_ULPTX_WR commands have a 256 byte immediate only
++ * payload limit.
+ */
+ static inline int is_ofld_imm(const struct sk_buff *skb)
+ {
+ struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
+ unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
+
+- if (opcode == FW_CRYPTO_LOOKASIDE_WR)
++ if (unlikely(opcode == FW_ULPTX_WR))
++ return skb->len <= MAX_IMM_ULPTX_WR_LEN;
++ else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ return skb->len <= SGE_MAX_WR_LEN;
+ else
+- return skb->len <= MAX_IMM_TX_PKT_LEN;
++ return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+index 47ba81e42f5d0..b1161bdeda4dc 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
++++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+@@ -50,9 +50,6 @@
+ #define MIN_RCV_WND (24 * 1024U)
+ #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+
+-/* ulp_mem_io + ulptx_idata + payload + padding */
+-#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+-
+ /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+ #define TX_HEADER_LEN \
+ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 6faa20bed4885..9905caeaeee3e 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2672,7 +2672,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+ u32 hash;
+ u64 ns;
+
+- np = container_of(&portal, struct dpaa_napi_portal, p);
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
+ fd_status = be32_to_cpu(fd->status);
+ fd_format = qm_fd_get_format(fd);
+@@ -2687,6 +2686,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
++ np = &percpu_priv->np;
+
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
+ return qman_cb_dqrr_stop;
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index fb0bcd18ec0c1..f1c2b3c7f7e99 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -399,10 +399,20 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
+
+ err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+- if (unlikely(err))
++ if (unlikely(err)) {
++ addr = dma_map_page(priv->net_dev->dev.parent,
++ virt_to_page(vaddr), 0,
++ priv->rx_buf_size, DMA_BIDIRECTIONAL);
++ if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
++ free_pages((unsigned long)vaddr, 0);
++ } else {
++ ch->buf_count++;
++ dpaa2_eth_xdp_release_buf(priv, ch, addr);
++ }
+ ch->stats.xdp_drop++;
+- else
++ } else {
+ ch->stats.xdp_redirect++;
++ }
+ break;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index 3eb5f1375bd4c..515c5b29d7aab 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -1157,14 +1157,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
+- enetc_phylink_destroy(priv);
+- enetc_mdiobus_destroy(pf);
+
+ if (pf->num_vfs)
+ enetc_sriov_configure(pdev, 0);
+
+ unregister_netdev(si->ndev);
+
++ enetc_phylink_destroy(priv);
++ enetc_mdiobus_destroy(pf);
++
+ enetc_free_msix(priv);
+
+ enetc_free_si_resources(priv);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index a536fdbf05e19..13ae7eee7ef5f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -247,8 +247,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ if (!ltb->buff)
+ return;
+
++ /* VIOS automatically unmaps the long term buffer at remote
++ * end for the following resets:
++ * FAILOVER, MOBILITY, TIMEOUT.
++ */
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+- adapter->reset_reason != VNIC_RESET_MOBILITY)
++ adapter->reset_reason != VNIC_RESET_MOBILITY &&
++ adapter->reset_reason != VNIC_RESET_TIMEOUT)
+ send_request_unmap(adapter, ltb->map_id);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ }
+@@ -1353,10 +1358,8 @@ static int __ibmvnic_close(struct net_device *netdev)
+
+ adapter->state = VNIC_CLOSING;
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+- if (rc)
+- return rc;
+ adapter->state = VNIC_CLOSED;
+- return 0;
++ return rc;
+ }
+
+ static int ibmvnic_close(struct net_device *netdev)
+@@ -1702,6 +1705,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ skb_copy_from_linear_data(skb, dst, skb->len);
+ }
+
++ /* post changes to long_term_buff *dst before VIOS accessing it */
++ dma_wmb();
++
+ tx_pool->consumer_index =
+ (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
+
+@@ -2389,6 +2395,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ unsigned long flags;
+ int ret;
+
++ spin_lock_irqsave(&adapter->rwi_lock, flags);
++
+ /*
+ * If failover is pending don't schedule any other reset.
+ * Instead let the failover complete. If there is already a
+@@ -2409,14 +2417,11 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ goto err;
+ }
+
+- spin_lock_irqsave(&adapter->rwi_lock, flags);
+-
+ list_for_each(entry, &adapter->rwi_list) {
+ tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ if (tmp->reset_reason == reason) {
+ netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
+ reason);
+- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+ ret = EBUSY;
+ goto err;
+ }
+@@ -2424,8 +2429,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+
+ rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
+ if (!rwi) {
+- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+- ibmvnic_close(netdev);
+ ret = ENOMEM;
+ goto err;
+ }
+@@ -2438,12 +2441,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ }
+ rwi->reset_reason = reason;
+ list_add_tail(&rwi->list, &adapter->rwi_list);
+- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+ netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
+ schedule_work(&adapter->ibmvnic_reset);
+
+- return 0;
++ ret = 0;
+ err:
++ /* ibmvnic_close() below can block, so drop the lock first */
++ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
++
++ if (ret == ENOMEM)
++ ibmvnic_close(netdev);
++
+ return -ret;
+ }
+
+@@ -2541,6 +2549,8 @@ restart_poll:
+ offset = be16_to_cpu(next->rx_comp.off_frame_data);
+ flags = next->rx_comp.flags;
+ skb = rx_buff->skb;
++ /* load long_term_buff before copying to skb */
++ dma_rmb();
+ skb_copy_to_linear_data(skb, rx_buff->data + offset,
+ length);
+
+@@ -5459,7 +5469,18 @@ static int ibmvnic_remove(struct vio_dev *dev)
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->state_lock, flags);
++
++ /* If ibmvnic_reset() is scheduling a reset, wait for it to
++ * finish. Then, set the state to REMOVING to prevent it from
++ * scheduling any more work and to have reset functions ignore
++ * any resets that have already been scheduled. Drop the lock
++ * after setting state, so __ibmvnic_reset() which is called
++ * from the flush_work() below, can make progress.
++ */
++ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ adapter->state = VNIC_REMOVING;
++ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
++
+ spin_unlock_irqrestore(&adapter->state_lock, flags);
+
+ flush_work(&adapter->ibmvnic_reset);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index c09c3f6bba9f2..72fea3b1c87d9 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -31,7 +31,7 @@
+ #define IBMVNIC_BUFFS_PER_POOL 100
+ #define IBMVNIC_MAX_QUEUES 16
+ #define IBMVNIC_MAX_QUEUE_SZ 4096
+-#define IBMVNIC_MAX_IND_DESCS 128
++#define IBMVNIC_MAX_IND_DESCS 16
+ #define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
+
+ #define IBMVNIC_TSO_BUF_SZ 65536
+@@ -1081,6 +1081,7 @@ struct ibmvnic_adapter {
+ struct tasklet_struct tasklet;
+ enum vnic_state state;
+ enum ibmvnic_reset_reason reset_reason;
++ /* when taking both state and rwi locks, take state lock first */
+ spinlock_t rwi_lock;
+ struct list_head rwi_list;
+ struct work_struct ibmvnic_reset;
+@@ -1097,6 +1098,8 @@ struct ibmvnic_adapter {
+ struct ibmvnic_tunables desired;
+ struct ibmvnic_tunables fallback;
+
+- /* Used for serializatin of state field */
++ /* Used for serialization of state field. When taking both state
++ * and rwi locks, take state lock first.
++ */
+ spinlock_t state_lock;
+ };
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 26ba1f3eb2d85..9e81f85ee2d8d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -4878,7 +4878,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ enum i40e_admin_queue_err adq_err;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+- bool is_reset_needed;
++ u32 reset_needed = 0;
+ i40e_status status;
+ u32 i, j;
+
+@@ -4923,9 +4923,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ flags_complete:
+ changed_flags = orig_flags ^ new_flags;
+
+- is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
+- I40E_FLAG_DISABLE_FW_LLDP));
++ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
++ reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
++ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
++ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
++ reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
+
+ /* Before we finalize any flag changes, we need to perform some
+ * checks to ensure that the changes are supported and safe.
+@@ -5057,7 +5059,7 @@ flags_complete:
+ case I40E_AQ_RC_EEXIST:
+ dev_warn(&pf->pdev->dev,
+ "FW LLDP agent is already running\n");
+- is_reset_needed = false;
++ reset_needed = 0;
+ break;
+ case I40E_AQ_RC_EPERM:
+ dev_warn(&pf->pdev->dev,
+@@ -5086,8 +5088,8 @@ flags_complete:
+ /* Issue reset to cause things to take effect, as additional bits
+ * are added we will need to create a mask of bits requiring reset
+ */
+- if (is_reset_needed)
+- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
++ if (reset_needed)
++ i40e_do_reset(pf, reset_needed, true);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 1db482d310c2d..fcd6f623f2fd8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2616,7 +2616,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ return;
+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
+ return;
+- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
++ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ return;
+ }
+@@ -2634,7 +2634,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ }
+ }
+ }
+- clear_bit(__I40E_VF_DISABLE, pf->state);
+ }
+
+ /**
+@@ -5921,7 +5920,7 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
+ ch->seid = ctxt.seid;
+ ch->vsi_number = ctxt.vsi_number;
+- ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
++ ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
+
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+@@ -7600,8 +7599,8 @@ static inline void
+ i40e_set_cld_element(struct i40e_cloud_filter *filter,
+ struct i40e_aqc_cloud_filters_element_data *cld)
+ {
+- int i, j;
+ u32 ipa;
++ int i;
+
+ memset(cld, 0, sizeof(*cld));
+ ether_addr_copy(cld->outer_mac, filter->dst_mac);
+@@ -7612,14 +7611,14 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter,
+
+ if (filter->n_proto == ETH_P_IPV6) {
+ #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
+- for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
+- i++, j += 2) {
++ for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
+ ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
+- ipa = cpu_to_le32(ipa);
+- memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
++
++ *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
+ }
+ } else {
+ ipa = be32_to_cpu(filter->dst_ipv4);
++
+ memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
+ }
+
+@@ -7667,6 +7666,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ if (filter->flags >= ARRAY_SIZE(flag_table))
+ return I40E_ERR_CONFIG;
+
++ memset(&cld_filter, 0, sizeof(cld_filter));
++
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter);
+
+@@ -7730,10 +7731,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ return -EOPNOTSUPP;
+
+ /* adding filter using src_port/src_ip is not supported at this stage */
+- if (filter->src_port || filter->src_ipv4 ||
++ if (filter->src_port ||
++ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ !ipv6_addr_any(&filter->ip.v6.src_ip6))
+ return -EOPNOTSUPP;
+
++ memset(&cld_filter, 0, sizeof(cld_filter));
++
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter.element);
+
+@@ -7757,7 +7761,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
+ }
+
+- } else if (filter->dst_ipv4 ||
++ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+@@ -8533,11 +8537,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
+ i40e_handle_reset_warning(pf, lock_acquired);
+
+- dev_info(&pf->pdev->dev,
+- pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+- "FW LLDP is disabled\n" :
+- "FW LLDP is enabled\n");
+-
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+@@ -8545,6 +8544,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
++ dev_info(&pf->pdev->dev,
++ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
++ "FW LLDP is disabled\n" :
++ "FW LLDP is enabled\n");
+
+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
+ int v;
+@@ -10001,7 +10004,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+- u8 set_fc_aq_fail = 0;
+ i40e_status ret;
+ u32 val;
+ int v;
+@@ -10127,13 +10129,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+- /* make sure our flow control settings are restored */
+- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+- if (ret)
+- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+- i40e_stat_str(&pf->hw, ret),
+- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+-
+ /* Rebuild the VSIs and VEBs that existed before reset.
+ * They are still in our local switch element arrays, so only
+ * need to rebuild the switch model in the HW.
+@@ -11709,6 +11704,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+ struct i40e_aqc_configure_partition_bw_data bw_data;
+ i40e_status status;
+
++ memset(&bw_data, 0, sizeof(bw_data));
++
+ /* Set the valid bit for this PF */
+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+@@ -14714,7 +14711,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ int err;
+ u32 val;
+ u32 i;
+- u8 set_fc_aq_fail;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+@@ -15048,24 +15044,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ }
+ INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+- /* Make sure flow control is set according to current settings */
+- err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+- dev_dbg(&pf->pdev->dev,
+- "Set fc with err %s aq_err %s on get_phy_cap\n",
+- i40e_stat_str(hw, err),
+- i40e_aq_str(hw, hw->aq.asq_last_status));
+- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+- dev_dbg(&pf->pdev->dev,
+- "Set fc with err %s aq_err %s on set_phy_config\n",
+- i40e_stat_str(hw, err),
+- i40e_aq_str(hw, hw->aq.asq_last_status));
+- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+- dev_dbg(&pf->pdev->dev,
+- "Set fc with err %s aq_err %s on get_link_info\n",
+- i40e_stat_str(hw, err),
+- i40e_aq_str(hw, hw->aq.asq_last_status));
+-
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 4aca637d4a23c..903d4e8cb0a11 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1793,7 +1793,7 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+
+ if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+- u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
++ __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(vlan_tag));
+@@ -3113,13 +3113,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+
+ l4_proto = ip.v4->protocol;
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
++ int ret;
++
+ tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+- if (l4.hdr != exthdr)
+- ipv6_skip_exthdr(skb, exthdr - skb->data,
+- &l4_proto, &frag_off);
++ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
++ &l4_proto, &frag_off);
++ if (ret < 0)
++ return -1;
+ }
+
+ /* define outer transport */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 492ce213208d2..37a21fb999221 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -444,7 +444,7 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
+ struct i40e_tx_desc *tx_desc;
+
+ tx_desc = I40E_TX_DESC(xdp_ring, ntu);
+- tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
++ tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index fa1e128c24eca..619d93f8b54c4 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -443,9 +443,7 @@ struct ice_pf {
+ struct ice_hw_port_stats stats_prev;
+ struct ice_hw hw;
+ u8 stat_prev_loaded:1; /* has previous stats been loaded */
+-#ifdef CONFIG_DCB
+ u16 dcbx_cap;
+-#endif /* CONFIG_DCB */
+ u32 tx_timeout_count;
+ unsigned long tx_timeout_last_recovery;
+ u32 tx_timeout_recovery_level;
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+index 87f91b750d59a..8c133a8be6add 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+@@ -136,7 +136,7 @@ ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
+ if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ return -EINVAL;
+
+- *num = IEEE_8021QAZ_MAX_TCS;
++ *num = pf->hw.func_caps.common_cap.maxtc;
+ return 0;
+ }
+
+@@ -160,6 +160,10 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+ {
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
++ /* if FW LLDP agent is running, DCBNL not allowed to change mode */
++ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
++ return ICE_DCB_NO_HW_CHG;
++
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 69c113a4de7e6..aebebd2102da0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -8,6 +8,7 @@
+ #include "ice_fltr.h"
+ #include "ice_lib.h"
+ #include "ice_dcb_lib.h"
++#include
+
+ struct ice_stats {
+ char stat_string[ETH_GSTRING_LEN];
+@@ -1238,6 +1239,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
+ status = ice_init_pf_dcb(pf, true);
+ if (status)
+ dev_warn(dev, "Fail to init DCB\n");
++
++ pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
++ pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
+ } else {
+ enum ice_status status;
+ bool dcbx_agent_status;
+@@ -1280,6 +1284,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
+ if (status)
+ dev_dbg(dev, "Fail to enable MIB change events\n");
+
++ pf->dcbx_cap &= ~DCB_CAP_DCBX_HOST;
++ pf->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
++
+ ice_nway_reset(netdev);
+ }
+ }
+@@ -3321,6 +3328,18 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+ ch->max_other = ch->other_count;
+ }
+
++/**
++ * ice_get_valid_rss_size - return valid number of RSS queues
++ * @hw: pointer to the HW structure
++ * @new_size: requested RSS queues
++ */
++static int ice_get_valid_rss_size(struct ice_hw *hw, int new_size)
++{
++ struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
++
++ return min_t(int, new_size, BIT(caps->rss_table_entry_width));
++}
++
+ /**
+ * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size
+ * @vsi: VSI to reconfigure RSS LUT on
+@@ -3348,14 +3367,10 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
+ return -ENOMEM;
+
+ /* set RSS LUT parameters */
+- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
++ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ vsi->rss_size = 1;
+- } else {
+- struct ice_hw_common_caps *caps = &hw->func_caps.common_cap;
+-
+- vsi->rss_size = min_t(int, req_rss_size,
+- BIT(caps->rss_table_entry_width));
+- }
++ else
++ vsi->rss_size = ice_get_valid_rss_size(hw, req_rss_size);
+
+ /* create/set RSS LUT */
+ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+@@ -3434,9 +3449,12 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+
+- if (new_rx && !netif_is_rxfh_configured(dev))
++ if (!netif_is_rxfh_configured(dev))
+ return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+
++ /* Update rss_size due to change in Rx queues */
++ vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index ec7f6c64132ee..b3161c5def465 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -1878,6 +1878,29 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+ sizeof(struct virtchnl_version_info));
+ }
+
++/**
++ * ice_vc_get_max_frame_size - get max frame size allowed for VF
++ * @vf: VF used to determine max frame size
++ *
++ * Max frame size is determined based on the current port's max frame size and
++ * whether a port VLAN is configured on this VF. The VF is not aware whether
++ * it's in a port VLAN so the PF needs to account for this in max frame size
++ * checks and sending the max frame size to the VF.
++ */
++static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
++{
++ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
++ struct ice_port_info *pi = vsi->port_info;
++ u16 max_frame_size;
++
++ max_frame_size = pi->phy.link_info.max_frame_size;
++
++ if (vf->port_vlan_info)
++ max_frame_size -= VLAN_HLEN;
++
++ return max_frame_size;
++}
++
+ /**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+@@ -1960,6 +1983,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+ vfres->max_vectors = pf->num_msix_per_vf;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
++ vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+@@ -2952,6 +2976,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
++ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
++
+ num_rxq++;
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+@@ -2964,7 +2990,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+- if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
++ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+@@ -2972,6 +2998,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ }
+
+ vsi->max_frame = qpi->rxq.max_pkt_size;
++ /* add space for the port VLAN since the VF driver is not
++ * expected to account for it in the MTU calculation
++ */
++ if (vf->port_vlan_info)
++ vsi->max_frame += VLAN_HLEN;
+ }
+
+ /* VF can request to configure less than allocated queues or default
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index bc4d8d1444019..fd5b33646ea71 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3432,7 +3432,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
+ return -ENOMEM;
+
+ /* Setup XPS mapping */
+- if (txq_number > 1)
++ if (pp->neta_armada3700)
++ cpu = 0;
++ else if (txq_number > 1)
+ cpu = txq->id % num_present_cpus();
+ else
+ cpu = pp->rxq_def % num_present_cpus();
+@@ -4210,6 +4212,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
+ node_online);
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
++ /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
++ * are routed to CPU 0, so we don't need all the cpu-hotplug support
++ */
++ if (pp->neta_armada3700)
++ return 0;
+
+ spin_lock(&pp->lock);
+ /*
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index d27543c1a166a..bb3fdaf337519 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -385,7 +385,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
+ u16 pcifunc;
+ int ret, lf;
+
+- cmd_buf = memdup_user(buffer, count);
++ cmd_buf = memdup_user(buffer, count + 1);
+ if (IS_ERR(cmd_buf))
+ return -ENOMEM;
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 394f43add85cf..a99e71bc7b3c9 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
+
+ if (!fs_rule->mirr_mbox) {
+ mlx4_err(dev, "rule mirroring mailbox is null\n");
++ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 3261d0dc11044..41474e42a819a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -128,6 +128,11 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ {
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
++ if (mlx5_lag_is_active(dev)) {
++ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
++ return -EOPNOTSUPP;
++ }
++
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ mlx5_unload_one(dev, false);
+@@ -273,6 +278,10 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
+ return -EOPNOTSUPP;
+ }
++ if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
++ NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
++ return -EOPNOTSUPP;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+index 6bc6b48a56dc7..24e2c0d955b99 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+@@ -12,6 +12,7 @@
+ #include
+ #include
+ #include
++#include
+ #include
+
+ #include "lib/fs_chains.h"
+@@ -51,11 +52,11 @@ struct mlx5_tc_ct_priv {
+ struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_table *post_ct;
+ struct mutex control_lock; /* guards parallel adds/dels */
+- struct mutex shared_counter_lock;
+ struct mapping_ctx *zone_mapping;
+ struct mapping_ctx *labels_mapping;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_fs_chains *chains;
++ spinlock_t ht_lock; /* protects ft entries */
+ };
+
+ struct mlx5_ct_flow {
+@@ -124,6 +125,10 @@ struct mlx5_ct_counter {
+ bool is_shared;
+ };
+
++enum {
++ MLX5_CT_ENTRY_FLAG_VALID,
++};
++
+ struct mlx5_ct_entry {
+ struct rhash_head node;
+ struct rhash_head tuple_node;
+@@ -134,6 +139,12 @@ struct mlx5_ct_entry {
+ struct mlx5_ct_tuple tuple;
+ struct mlx5_ct_tuple tuple_nat;
+ struct mlx5_ct_zone_rule zone_rules[2];
++
++ struct mlx5_tc_ct_priv *ct_priv;
++ struct work_struct work;
++
++ refcount_t refcnt;
++ unsigned long flags;
+ };
+
+ static const struct rhashtable_params cts_ht_params = {
+@@ -740,6 +751,87 @@ err_attr:
+ return err;
+ }
+
++static bool
++mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
++{
++ return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
++}
++
++static struct mlx5_ct_entry *
++mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
++{
++ struct mlx5_ct_entry *entry;
++
++ entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
++ tuples_ht_params);
++ if (entry && mlx5_tc_ct_entry_valid(entry) &&
++ refcount_inc_not_zero(&entry->refcnt)) {
++ return entry;
++ } else if (!entry) {
++ entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
++ tuple, tuples_nat_ht_params);
++ if (entry && mlx5_tc_ct_entry_valid(entry) &&
++ refcount_inc_not_zero(&entry->refcnt))
++ return entry;
++ }
++
++ return entry ? ERR_PTR(-EINVAL) : NULL;
++}
++
++static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
++{
++ struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
++
++ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
++ &entry->tuple_nat_node,
++ tuples_nat_ht_params);
++ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
++ tuples_ht_params);
++}
++
++static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
++{
++ struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
++
++ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
++
++ spin_lock_bh(&ct_priv->ht_lock);
++ mlx5_tc_ct_entry_remove_from_tuples(entry);
++ spin_unlock_bh(&ct_priv->ht_lock);
++
++ mlx5_tc_ct_counter_put(ct_priv, entry);
++ kfree(entry);
++}
++
++static void
++mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
++{
++ if (!refcount_dec_and_test(&entry->refcnt))
++ return;
++
++ mlx5_tc_ct_entry_del(entry);
++}
++
++static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
++{
++ struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
++
++ mlx5_tc_ct_entry_del(entry);
++}
++
++static void
++__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
++{
++ struct mlx5e_priv *priv;
++
++ if (!refcount_dec_and_test(&entry->refcnt))
++ return;
++
++ priv = netdev_priv(entry->ct_priv->netdev);
++ INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
++ queue_work(priv->wq, &entry->work);
++}
++
+ static struct mlx5_ct_counter *
+ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+ {
+@@ -792,16 +884,26 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
+ }
+
+ /* Use the same counter as the reverse direction */
+- mutex_lock(&ct_priv->shared_counter_lock);
+- rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
+- tuples_ht_params);
+- if (rev_entry) {
+- if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
+- mutex_unlock(&ct_priv->shared_counter_lock);
+- return rev_entry->counter;
+- }
++ spin_lock_bh(&ct_priv->ht_lock);
++ rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
++
++ if (IS_ERR(rev_entry)) {
++ spin_unlock_bh(&ct_priv->ht_lock);
++ goto create_counter;
++ }
++
++ if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
++ ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
++ shared_counter = rev_entry->counter;
++ spin_unlock_bh(&ct_priv->ht_lock);
++
++ mlx5_tc_ct_entry_put(rev_entry);
++ return shared_counter;
+ }
+- mutex_unlock(&ct_priv->shared_counter_lock);
++
++ spin_unlock_bh(&ct_priv->ht_lock);
++
++create_counter:
+
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter)) {
+@@ -866,10 +968,14 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ if (!meta_action)
+ return -EOPNOTSUPP;
+
+- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+- cts_ht_params);
+- if (entry)
+- return 0;
++ spin_lock_bh(&ct_priv->ht_lock);
++ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
++ if (entry && refcount_inc_not_zero(&entry->refcnt)) {
++ spin_unlock_bh(&ct_priv->ht_lock);
++ mlx5_tc_ct_entry_put(entry);
++ return -EEXIST;
++ }
++ spin_unlock_bh(&ct_priv->ht_lock);
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+@@ -878,6 +984,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ entry->tuple.zone = ft->zone;
+ entry->cookie = flow->cookie;
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
++ refcount_set(&entry->refcnt, 2);
++ entry->ct_priv = ct_priv;
+
+ err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
+ if (err)
+@@ -888,35 +996,40 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
+ if (err)
+ goto err_set;
+
+- err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
+- &entry->tuple_node,
+- tuples_ht_params);
++ spin_lock_bh(&ct_priv->ht_lock);
++
++ err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
++ cts_ht_params);
++ if (err)
++ goto err_entries;
++
++ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
++ &entry->tuple_node,
++ tuples_ht_params);
+ if (err)
+ goto err_tuple;
+
+ if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
+- err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
+- &entry->tuple_nat_node,
+- tuples_nat_ht_params);
++ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
++ &entry->tuple_nat_node,
++ tuples_nat_ht_params);
+ if (err)
+ goto err_tuple_nat;
+ }
++ spin_unlock_bh(&ct_priv->ht_lock);
+
+ err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
+ ft->zone_restore_id);
+ if (err)
+ goto err_rules;
+
+- err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
+- cts_ht_params);
+- if (err)
+- goto err_insert;
++ set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
++ mlx5_tc_ct_entry_put(entry); /* this function reference */
+
+ return 0;
+
+-err_insert:
+- mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ err_rules:
++ spin_lock_bh(&ct_priv->ht_lock);
+ if (mlx5_tc_ct_entry_has_nat(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node, tuples_nat_ht_params);
+@@ -925,47 +1038,43 @@ err_tuple_nat:
+ &entry->tuple_node,
+ tuples_ht_params);
+ err_tuple:
++ rhashtable_remove_fast(&ft->ct_entries_ht,
++ &entry->node,
++ cts_ht_params);
++err_entries:
++ spin_unlock_bh(&ct_priv->ht_lock);
+ err_set:
+ kfree(entry);
+- netdev_warn(ct_priv->netdev,
+- "Failed to offload ct entry, err: %d\n", err);
++ if (err != -EEXIST)
++ netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
+ return err;
+ }
+
+-static void
+-mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
+- struct mlx5_ct_entry *entry)
+-{
+- mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+- mutex_lock(&ct_priv->shared_counter_lock);
+- if (mlx5_tc_ct_entry_has_nat(entry))
+- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+- &entry->tuple_nat_node,
+- tuples_nat_ht_params);
+- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+- tuples_ht_params);
+- mutex_unlock(&ct_priv->shared_counter_lock);
+- mlx5_tc_ct_counter_put(ct_priv, entry);
+-
+-}
+-
+ static int
+ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+ {
++ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ unsigned long cookie = flow->cookie;
+ struct mlx5_ct_entry *entry;
+
+- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+- cts_ht_params);
+- if (!entry)
++ spin_lock_bh(&ct_priv->ht_lock);
++ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
++ if (!entry) {
++ spin_unlock_bh(&ct_priv->ht_lock);
+ return -ENOENT;
++ }
+
+- mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
+- WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
+- &entry->node,
+- cts_ht_params));
+- kfree(entry);
++ if (!mlx5_tc_ct_entry_valid(entry)) {
++ spin_unlock_bh(&ct_priv->ht_lock);
++ return -EINVAL;
++ }
++
++ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
++ mlx5_tc_ct_entry_remove_from_tuples(entry);
++ spin_unlock_bh(&ct_priv->ht_lock);
++
++ mlx5_tc_ct_entry_put(entry);
+
+ return 0;
+ }
+@@ -974,19 +1083,30 @@ static int
+ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *f)
+ {
++ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ unsigned long cookie = f->cookie;
+ struct mlx5_ct_entry *entry;
+ u64 lastuse, packets, bytes;
+
+- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
+- cts_ht_params);
+- if (!entry)
++ spin_lock_bh(&ct_priv->ht_lock);
++ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
++ if (!entry) {
++ spin_unlock_bh(&ct_priv->ht_lock);
+ return -ENOENT;
++ }
++
++ if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
++ spin_unlock_bh(&ct_priv->ht_lock);
++ return -EINVAL;
++ }
++
++ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
+ flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+
++ mlx5_tc_ct_entry_put(entry);
+ return 0;
+ }
+
+@@ -1478,11 +1598,9 @@ err_mapping:
+ static void
+ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
+ {
+- struct mlx5_tc_ct_priv *ct_priv = arg;
+ struct mlx5_ct_entry *entry = ptr;
+
+- mlx5_tc_ct_del_ft_entry(ct_priv, entry);
+- kfree(entry);
++ mlx5_tc_ct_entry_put(entry);
+ }
+
+ static void
+@@ -1960,6 +2078,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ goto err_mapping_labels;
+ }
+
++ spin_lock_init(&ct_priv->ht_lock);
+ ct_priv->ns_type = ns_type;
+ ct_priv->chains = chains;
+ ct_priv->netdev = priv->netdev;
+@@ -1994,7 +2113,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+
+ idr_init(&ct_priv->fte_ids);
+ mutex_init(&ct_priv->control_lock);
+- mutex_init(&ct_priv->shared_counter_lock);
+ rhashtable_init(&ct_priv->zone_ht, &zone_params);
+ rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
+ rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
+@@ -2037,7 +2155,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
+ rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
+ rhashtable_destroy(&ct_priv->zone_ht);
+ mutex_destroy(&ct_priv->control_lock);
+- mutex_destroy(&ct_priv->shared_counter_lock);
+ idr_destroy(&ct_priv->fte_ids);
+ kfree(ct_priv);
+ }
+@@ -2059,14 +2176,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
+ if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
+ return false;
+
+- entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
+- tuples_ht_params);
+- if (!entry)
+- entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
+- &tuple, tuples_nat_ht_params);
+- if (!entry)
++ spin_lock(&ct_priv->ht_lock);
++
++ entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
++ if (!entry) {
++ spin_unlock(&ct_priv->ht_lock);
++ return false;
++ }
++
++ if (IS_ERR(entry)) {
++ spin_unlock(&ct_priv->ht_lock);
+ return false;
++ }
++ spin_unlock(&ct_priv->ht_lock);
+
+ tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
++ __mlx5_tc_ct_entry_put(entry);
++
+ return true;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+index d487e5e371625..8d991c3b7a503 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+@@ -83,7 +83,7 @@ static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
+
+ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+ /* Let other device's napi(s) and XSK wakeups see our new state. */
+- synchronize_rcu();
++ synchronize_net();
+ }
+
+ static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index d87c345878d3d..f4bce1365639e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -111,7 +111,7 @@ err_free_cparam:
+ void mlx5e_close_xsk(struct mlx5e_channel *c)
+ {
+ clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+- synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
++ synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
+
+ mlx5e_close_rq(&c->xskrq);
+ mlx5e_close_cq(&c->xskrq.cq);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+index 1fae7fab8297e..ff81b69a59a9b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+@@ -173,7 +173,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+ #endif
+
+ #if IS_ENABLED(CONFIG_GENEVE)
+- if (skb->encapsulation)
++ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
+ #endif
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+index 6a1d82503ef8f..d06532d0baa43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+@@ -57,6 +57,20 @@ struct mlx5e_ktls_offload_context_rx {
+ struct mlx5e_ktls_rx_resync_ctx resync;
+ };
+
++static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
++{
++ if (!refcount_dec_and_test(&priv_rx->resync.refcnt))
++ return false;
++
++ kfree(priv_rx);
++ return true;
++}
++
++static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx)
++{
++ refcount_inc(&priv_rx->resync.refcnt);
++}
++
+ static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
+ {
+ int err, inlen;
+@@ -326,7 +340,7 @@ static void resync_handle_work(struct work_struct *work)
+ priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
+
+ if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+- refcount_dec(&resync->refcnt);
++ mlx5e_ktls_priv_rx_put(priv_rx);
+ return;
+ }
+
+@@ -334,7 +348,7 @@ static void resync_handle_work(struct work_struct *work)
+ sq = &c->async_icosq;
+
+ if (resync_post_get_progress_params(sq, priv_rx))
+- refcount_dec(&resync->refcnt);
++ mlx5e_ktls_priv_rx_put(priv_rx);
+ }
+
+ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
+@@ -377,7 +391,11 @@ unlock:
+ return err;
+ }
+
+-/* Function is called with elevated refcount, it decreases it. */
++/* Function can be called with the refcount being either elevated or not.
++ * It decreases the refcount and may free the kTLS priv context.
++ * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was
++ * already in flight.
++ */
+ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
+ struct mlx5e_icosq *sq)
+ {
+@@ -410,7 +428,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
+ tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ priv_rx->stats->tls_resync_req_end++;
+ out:
+- refcount_dec(&resync->refcnt);
++ mlx5e_ktls_priv_rx_put(priv_rx);
+ dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+ kfree(buf);
+ }
+@@ -431,9 +449,9 @@ static bool resync_queue_get_psv(struct sock *sk)
+ return false;
+
+ resync = &priv_rx->resync;
+- refcount_inc(&resync->refcnt);
++ mlx5e_ktls_priv_rx_get(priv_rx);
+ if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+- refcount_dec(&resync->refcnt);
++ mlx5e_ktls_priv_rx_put(priv_rx);
+
+ return true;
+ }
+@@ -625,31 +643,6 @@ err_create_key:
+ return err;
+ }
+
+-/* Elevated refcount on the resync object means there are
+- * outstanding operations (uncompleted GET_PSV WQEs) that
+- * will read the resync / priv_rx objects once completed.
+- * Wait for them to avoid use-after-free.
+- */
+-static void wait_for_resync(struct net_device *netdev,
+- struct mlx5e_ktls_rx_resync_ctx *resync)
+-{
+-#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
+- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
+- unsigned int refcnt;
+-
+- do {
+- refcnt = refcount_read(&resync->refcnt);
+- if (refcnt == 1)
+- return;
+-
+- msleep(20);
+- } while (time_before(jiffies, exp_time));
+-
+- netdev_warn(netdev,
+- "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
+- refcnt);
+-}
+-
+ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ {
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+@@ -663,7 +656,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
+ set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
+ mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
+- synchronize_rcu(); /* Sync with NAPI */
++ synchronize_net(); /* Sync with NAPI */
+ if (!cancel_work_sync(&priv_rx->rule.work))
+ /* completion is needed, as the priv_rx in the add flow
+ * is maintained on the wqe info (wi), not on the socket.
+@@ -671,8 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+ wait_for_completion(&priv_rx->add_ctx);
+ resync = &priv_rx->resync;
+ if (cancel_work_sync(&resync->work))
+- refcount_dec(&resync->refcnt);
+- wait_for_resync(netdev, resync);
++ mlx5e_ktls_priv_rx_put(priv_rx);
+
+ priv_rx->stats->tls_del++;
+ if (priv_rx->rule.rule)
+@@ -680,5 +672,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+
+ mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+ mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+- kfree(priv_rx);
++ /* priv_rx should normally be freed here, but if there is an outstanding
++ * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
++ * processed.
++ */
++ mlx5e_ktls_priv_rx_put(priv_rx);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 302001d6661ea..8612c388db7d3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -525,7 +525,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
+ #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
+
+ static void
+-mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
++mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int tc;
+@@ -540,6 +540,17 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
++ }
++}
++
++static void
++mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
++{
++ struct mlx5_core_dev *mdev = priv->mdev;
++ int i;
++
++ for (i = 0; i < priv->channels.num; ++i) {
++ struct mlx5e_channel *c = priv->channels.c[i];
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+@@ -586,21 +597,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ tx_moder->pkts = coal->tx_max_coalesced_frames;
+ new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
+- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+- priv->channels.params = new_channels.params;
+- goto out;
+- }
+- /* we are opened */
+-
+ reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+
+- if (!reset_rx && !reset_tx) {
+- mlx5e_set_priv_channels_coalesce(priv, coal);
+- priv->channels.params = new_channels.params;
+- goto out;
+- }
+-
+ if (reset_rx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER);
+@@ -614,6 +613,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
+ mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ }
+
++ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
++ priv->channels.params = new_channels.params;
++ goto out;
++ }
++
++ if (!reset_rx && !reset_tx) {
++ if (!coal->use_adaptive_rx_coalesce)
++ mlx5e_set_priv_channels_rx_coalesce(priv, coal);
++ if (!coal->use_adaptive_tx_coalesce)
++ mlx5e_set_priv_channels_tx_coalesce(priv, coal);
++ priv->channels.params = new_channels.params;
++ goto out;
++ }
++
+ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
+
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3fc7d18ac868b..a2e0b548bf570 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -65,6 +65,7 @@
+ #include "en/devlink.h"
+ #include "lib/mlx5.h"
+ #include "en/ptp.h"
++#include "fpga/ipsec.h"
+
+ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+ {
+@@ -106,7 +107,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ return false;
+
+- if (MLX5_IPSEC_DEV(mdev))
++ if (mlx5_fpga_is_ipsec_device(mdev))
+ return false;
+
+ if (params->xdp_prog) {
+@@ -914,7 +915,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
+ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
+ {
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+- synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
++ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+ }
+
+ void mlx5e_close_rq(struct mlx5e_rq *rq)
+@@ -1348,7 +1349,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+- synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
++ synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
+
+ mlx5e_tx_disable_queue(sq->txq);
+
+@@ -1423,7 +1424,7 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
+ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+ {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+- synchronize_rcu(); /* Sync with NAPI. */
++ synchronize_net(); /* Sync with NAPI. */
+ }
+
+ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+@@ -1502,7 +1503,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+ struct mlx5e_channel *c = sq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+- synchronize_rcu(); /* Sync with NAPI. */
++ synchronize_net(); /* Sync with NAPI. */
+
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
+ mlx5e_free_xdpsq_descs(sq);
+@@ -1826,12 +1827,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
+
+ mlx5e_build_create_cq_param(&ccp, c);
+
+- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
++ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ &c->async_icosq.cq);
+ if (err)
+ return err;
+
+- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
++ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ &c->icosq.cq);
+ if (err)
+ goto err_close_async_icosq_cq;
+@@ -2069,7 +2070,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
+ int i;
+
+ #ifdef CONFIG_MLX5_EN_IPSEC
+- if (MLX5_IPSEC_DEV(mdev))
++ if (mlx5_fpga_is_ipsec_device(mdev))
+ byte_count += MLX5E_METADATA_ETHER_LEN;
+ #endif
+
+@@ -4455,8 +4456,9 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
+ return -EINVAL;
+ }
+
+- if (MLX5_IPSEC_DEV(priv->mdev)) {
+- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
++ if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
++ netdev_warn(netdev,
++ "XDP is not available on Innova cards with IPsec support\n");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index ca4b55839a8a7..4864deed9dc94 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1795,8 +1795,8 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
+
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+ #ifdef CONFIG_MLX5_EN_IPSEC
+- if (MLX5_IPSEC_DEV(mdev)) {
+- netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
++ if (mlx5_fpga_is_ipsec_device(mdev)) {
++ netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
+ return -EINVAL;
+ }
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index dd0bfbacad474..717fbaa6ce736 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -5040,7 +5040,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
+ */
+ if (rate) {
+ rate = (rate * BITS_PER_BYTE) + 500000;
+- rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
++ rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
+ }
+
+ err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+index cc67366495b09..22bee49902327 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+@@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec {
+ struct ida halloc;
+ };
+
+-static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
++bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
+ {
+ if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
+ return false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+index db88eb4c49e34..8931b55844773 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+@@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
+ const struct mlx5_flow_cmds *
+ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
+ void mlx5_fpga_ipsec_build_fs_cmds(void);
++bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
+ #else
+ static inline
+ const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
+@@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
+ }
+
+ static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
++static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
+
+ #endif /* CONFIG_MLX5_FPGA_IPSEC */
+ #endif /* __MLX5_FPGA_IPSEC_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 54523bed16cd3..0c32c485eb588 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
+ return true;
+ }
+
++static void enter_error_state(struct mlx5_core_dev *dev, bool force)
++{
++ if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
++ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
++ mlx5_cmd_flush(dev);
++ }
++
++ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
++}
++
+ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
+ {
+ bool err_detected = false;
+@@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
+ goto unlock;
+ }
+
+- if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
+- dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+- mlx5_cmd_flush(dev);
+- }
+-
+- mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
++ enter_error_state(dev, force);
+ unlock:
+ mutex_unlock(&dev->intf_state_mutex);
+ }
+@@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+- mlx5_enter_error_state(dev, false);
++ enter_error_state(dev, false);
+ if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ if (mlx5_health_try_recover(dev))
+ mlx5_core_err(dev, "health recovery failed\n");
+@@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t)
+ mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
+ dev->priv.health.fatal_error = fatal_error;
+ print_health_info(dev);
++ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mlx5_trigger_health_work(dev);
+- goto out;
++ return;
+ }
+
+ count = ioread32be(health->health_counter);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index ca6f2fc39ea0a..ba1a4ae28097d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1396,7 +1396,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
+
+ pci_save_state(pdev);
+- devlink_reload_enable(devlink);
++ if (!mlx5_core_is_mp_slave(dev))
++ devlink_reload_enable(devlink);
+ return 0;
+
+ err_load_one:
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 0d78408b4e269..470ff6b3ebef1 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2315,14 +2315,14 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
+
+ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+ {
+- RTL_W8(tp, MaxTxPacketSize, 0x3f);
++ RTL_W8(tp, MaxTxPacketSize, 0x24);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
+ }
+
+ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
+ {
+- RTL_W8(tp, MaxTxPacketSize, 0x0c);
++ RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+index f184b00f51166..5f500141567d0 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -301,7 +301,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+ return -EINVAL;
+ }
+
+- if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
++ if (delay_config & PRG_ETH0_ADJ_ENABLE) {
+ if (!dwmac->timing_adj_clk) {
+ dev_err(dwmac->dev,
+ "The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 56985542e2029..44bb133c30007 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -316,6 +316,32 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ if (!priv->dma_cap.av)
+ return -EOPNOTSUPP;
+
++ /* Port Transmit Rate and Speed Divider */
++ switch (priv->speed) {
++ case SPEED_10000:
++ ptr = 32;
++ speed_div = 10000000;
++ break;
++ case SPEED_5000:
++ ptr = 32;
++ speed_div = 5000000;
++ break;
++ case SPEED_2500:
++ ptr = 8;
++ speed_div = 2500000;
++ break;
++ case SPEED_1000:
++ ptr = 8;
++ speed_div = 1000000;
++ break;
++ case SPEED_100:
++ ptr = 4;
++ speed_div = 100000;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
++
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+@@ -332,10 +358,6 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
+
+- /* Port Transmit Rate and Speed Divider */
+- ptr = (priv->speed == SPEED_100) ? 4 : 8;
+- speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
+-
+ /* Final adjustments for HW */
+ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 6fea980acf646..b4a0bfce5b762 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1817,6 +1817,18 @@ static int axienet_probe(struct platform_device *pdev)
+ lp->options = XAE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
++
++ lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
++ if (IS_ERR(lp->clk)) {
++ ret = PTR_ERR(lp->clk);
++ goto free_netdev;
++ }
++ ret = clk_prepare_enable(lp->clk);
++ if (ret) {
++ dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
++ goto free_netdev;
++ }
++
+ /* Map device registers */
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+@@ -1992,20 +2004,6 @@ static int axienet_probe(struct platform_device *pdev)
+
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node) {
+- lp->clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(lp->clk)) {
+- dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
+- PTR_ERR(lp->clk));
+- lp->clk = NULL;
+- } else {
+- ret = clk_prepare_enable(lp->clk);
+- if (ret) {
+- dev_err(&pdev->dev, "Unable to enable clock: %d\n",
+- ret);
+- goto free_netdev;
+- }
+- }
+-
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 4c04e271f1844..fd3c2d86e48b1 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -539,7 +539,6 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+ mtu < ntohs(iph->tot_len)) {
+ netdev_dbg(dev, "packet too big, fragmentation needed\n");
+- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ goto err_rt;
+diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
+index 84bb8ae927252..eb1c8396bcdd9 100644
+--- a/drivers/net/ipa/ipa_main.c
++++ b/drivers/net/ipa/ipa_main.c
+@@ -581,10 +581,10 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
+ return -EINVAL;
+
+ for (i = 0; i < data->resource_src_count; i++)
+- ipa_resource_config_src(ipa, data->resource_src);
++ ipa_resource_config_src(ipa, &data->resource_src[i]);
+
+ for (i = 0; i < data->resource_dst_count; i++)
+- ipa_resource_config_dst(ipa, data->resource_dst);
++ ipa_resource_config_dst(ipa, &data->resource_dst[i]);
+
+ return 0;
+ }
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 54e0d75203dac..57f8021b70af5 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1295,6 +1295,7 @@ static struct phy_driver ksphy_driver[] = {
+ .driver_data = &ksz8081_type,
+ .probe = kszphy_probe,
+ .config_init = ksz8081_config_init,
++ .soft_reset = genphy_soft_reset,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
+ .get_sset_count = kszphy_get_sset_count,
+diff --git a/drivers/net/phy/mscc/Makefile b/drivers/net/phy/mscc/Makefile
+index d8e22a4eeeffa..78d84194f79ae 100644
+--- a/drivers/net/phy/mscc/Makefile
++++ b/drivers/net/phy/mscc/Makefile
+@@ -4,6 +4,7 @@
+
+ obj-$(CONFIG_MICROSEMI_PHY) := mscc.o
+ mscc-objs := mscc_main.o
++mscc-objs += mscc_serdes.o
+
+ ifdef CONFIG_MACSEC
+ mscc-objs += mscc_macsec.o
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 9481bce94c2ed..a50235fdf7d99 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -102,6 +102,7 @@ enum rgmii_clock_delay {
+ #define PHY_MCB_S6G_READ BIT(30)
+
+ #define PHY_S6G_PLL5G_CFG0 0x06
++#define PHY_S6G_PLL5G_CFG2 0x08
+ #define PHY_S6G_LCPLL_CFG 0x11
+ #define PHY_S6G_PLL_CFG 0x2b
+ #define PHY_S6G_COMMON_CFG 0x2c
+@@ -121,6 +122,9 @@ enum rgmii_clock_delay {
+ #define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
+ #define PHY_S6G_PLL_FSM_ENA_POS 7
+
++#define PHY_S6G_CFG2_FSM_DIS 1
++#define PHY_S6G_CFG2_FSM_CLK_BP 23
++
+ #define MSCC_EXT_PAGE_ACCESS 31
+ #define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
+ #define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
+@@ -136,6 +140,10 @@ enum rgmii_clock_delay {
+ #define MSCC_PHY_PAGE_1588 0x1588 /* PTP (1588) */
+ #define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */
+ #define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */
++#define MSCC_PHY_GPIO_CONTROL_2 14
++
++#define MSCC_PHY_COMA_MODE 0x2000 /* input(1) / output(0) */
++#define MSCC_PHY_COMA_OUTPUT 0x1000 /* value to output */
+
+ /* Extended Page 1 Registers */
+ #define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18
+@@ -335,6 +343,10 @@ enum rgmii_clock_delay {
+ #define VSC8584_REVB 0x0001
+ #define MSCC_DEV_REV_MASK GENMASK(3, 0)
+
++#define MSCC_ROM_TRAP_SERDES_6G_CFG 0x1E48
++#define MSCC_RAM_TRAP_SERDES_6G_CFG 0x1E4F
++#define PATCH_VEC_ZERO_EN 0x0100
++
+ struct reg_val {
+ u16 reg;
+ u32 val;
+@@ -412,6 +424,22 @@ struct vsc8531_edge_rate_table {
+ };
+ #endif /* CONFIG_OF_MDIO */
+
++enum csr_target {
++ MACRO_CTRL = 0x07,
++};
++
++u32 vsc85xx_csr_read(struct phy_device *phydev,
++ enum csr_target target, u32 reg);
++
++int vsc85xx_csr_write(struct phy_device *phydev,
++ enum csr_target target, u32 reg, u32 val);
++
++int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val);
++int phy_base_read(struct phy_device *phydev, u32 regnum);
++int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb);
++int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb);
++int vsc8584_cmd(struct phy_device *phydev, u16 val);
++
+ #if IS_ENABLED(CONFIG_MACSEC)
+ int vsc8584_macsec_init(struct phy_device *phydev);
+ void vsc8584_handle_macsec_interrupt(struct phy_device *phydev);
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 2f2157e3deab5..3a7705228ed59 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -17,7 +17,7 @@
+ #include
+ #include
+ #include
+-
++#include "mscc_serdes.h"
+ #include "mscc.h"
+
+ static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = {
+@@ -689,7 +689,7 @@ out_unlock:
+ }
+
+ /* phydev->bus->mdio_lock should be locked when using this function */
+-static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
++int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+ {
+ if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+@@ -700,7 +700,7 @@ static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+ }
+
+ /* phydev->bus->mdio_lock should be locked when using this function */
+-static int phy_base_read(struct phy_device *phydev, u32 regnum)
++int phy_base_read(struct phy_device *phydev, u32 regnum)
+ {
+ if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) {
+ dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n");
+@@ -710,6 +710,113 @@ static int phy_base_read(struct phy_device *phydev, u32 regnum)
+ return __phy_package_read(phydev, regnum);
+ }
+
++u32 vsc85xx_csr_read(struct phy_device *phydev,
++ enum csr_target target, u32 reg)
++{
++ unsigned long deadline;
++ u32 val, val_l, val_h;
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
++
++ /* CSR registers are grouped under different Target IDs.
++ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
++ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
++ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
++ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
++ */
++
++ /* Setup the Target ID */
++ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
++ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
++
++ if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
++ /* non-MACsec access */
++ target &= 0x3;
++ else
++ target = 0;
++
++ /* Trigger CSR Action - Read into the CSR's */
++ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
++ MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
++ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
++ MSCC_PHY_CSR_CNTL_19_TARGET(target));
++
++ /* Wait for register access*/
++ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++ do {
++ usleep_range(500, 1000);
++ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
++ } while (time_before(jiffies, deadline) &&
++ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
++
++ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
++ return 0xffffffff;
++
++ /* Read the Least Significant Word (LSW) (17) */
++ val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
++
++ /* Read the Most Significant Word (MSW) (18) */
++ val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++ MSCC_PHY_PAGE_STANDARD);
++
++ return (val_h << 16) | val_l;
++}
++
++int vsc85xx_csr_write(struct phy_device *phydev,
++ enum csr_target target, u32 reg, u32 val)
++{
++ unsigned long deadline;
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
++
++ /* CSR registers are grouped under different Target IDs.
++ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
++ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
++ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
++ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
++ */
++
++ /* Setup the Target ID */
++ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
++ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
++
++ /* Write the Least Significant Word (LSW) (17) */
++ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
++
++ /* Write the Most Significant Word (MSW) (18) */
++ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
++
++ if ((target >> 2 == 0x1) || (target >> 2 == 0x3))
++ /* non-MACsec access */
++ target &= 0x3;
++ else
++ target = 0;
++
++ /* Trigger CSR Action - Write into the CSR's */
++ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
++ MSCC_PHY_CSR_CNTL_19_CMD |
++ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
++ MSCC_PHY_CSR_CNTL_19_TARGET(target));
++
++ /* Wait for register access */
++ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++ do {
++ usleep_range(500, 1000);
++ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
++ } while (time_before(jiffies, deadline) &&
++ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
++
++ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
++ return -ETIMEDOUT;
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++ MSCC_PHY_PAGE_STANDARD);
++
++ return 0;
++}
++
+ /* bus->mdio_lock should be locked when using this function */
+ static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+ {
+@@ -719,7 +826,7 @@ static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val)
+ }
+
+ /* bus->mdio_lock should be locked when using this function */
+-static int vsc8584_cmd(struct phy_device *phydev, u16 val)
++int vsc8584_cmd(struct phy_device *phydev, u16 val)
+ {
+ unsigned long deadline;
+ u16 reg_val;
+@@ -1131,6 +1238,92 @@ out:
+ return ret;
+ }
+
++/* Access LCPLL Cfg_2 */
++static void vsc8584_pll5g_cfg2_wr(struct phy_device *phydev,
++ bool disable_fsm)
++{
++ u32 rd_dat;
++
++ rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
++ rd_dat &= ~BIT(PHY_S6G_CFG2_FSM_DIS);
++ rd_dat |= (disable_fsm << PHY_S6G_CFG2_FSM_DIS);
++ vsc85xx_csr_write(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2, rd_dat);
++}
++
++/* trigger a read to the spcified MCB */
++static int vsc8584_mcb_rd_trig(struct phy_device *phydev,
++ u32 mcb_reg_addr, u8 mcb_slave_num)
++{
++ u32 rd_dat = 0;
++
++ /* read MCB */
++ vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
++ (0x40000000 | (1L << mcb_slave_num)));
++
++ return read_poll_timeout(vsc85xx_csr_read, rd_dat,
++ !(rd_dat & 0x40000000),
++ 4000, 200000, 0,
++ phydev, MACRO_CTRL, mcb_reg_addr);
++}
++
++/* trigger a write to the spcified MCB */
++static int vsc8584_mcb_wr_trig(struct phy_device *phydev,
++ u32 mcb_reg_addr,
++ u8 mcb_slave_num)
++{
++ u32 rd_dat = 0;
++
++ /* write back MCB */
++ vsc85xx_csr_write(phydev, MACRO_CTRL, mcb_reg_addr,
++ (0x80000000 | (1L << mcb_slave_num)));
++
++ return read_poll_timeout(vsc85xx_csr_read, rd_dat,
++ !(rd_dat & 0x80000000),
++ 4000, 200000, 0,
++ phydev, MACRO_CTRL, mcb_reg_addr);
++}
++
++/* Sequence to Reset LCPLL for the VIPER and ELISE PHY */
++static int vsc8584_pll5g_reset(struct phy_device *phydev)
++{
++ bool dis_fsm;
++ int ret = 0;
++
++ ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
++ if (ret < 0)
++ goto done;
++ dis_fsm = 1;
++
++ /* Reset LCPLL */
++ vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
++
++ /* write back LCPLL MCB */
++ ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
++ if (ret < 0)
++ goto done;
++
++ /* 10 mSec sleep while LCPLL is hold in reset */
++ usleep_range(10000, 20000);
++
++ /* read LCPLL MCB into CSRs */
++ ret = vsc8584_mcb_rd_trig(phydev, 0x11, 0);
++ if (ret < 0)
++ goto done;
++ dis_fsm = 0;
++
++ /* Release the Reset of LCPLL */
++ vsc8584_pll5g_cfg2_wr(phydev, dis_fsm);
++
++ /* write back LCPLL MCB */
++ ret = vsc8584_mcb_wr_trig(phydev, 0x11, 0);
++ if (ret < 0)
++ goto done;
++
++ usleep_range(110000, 200000);
++done:
++ return ret;
++}
++
+ /* bus->mdio_lock should be locked when using this function */
+ static int vsc8584_config_pre_init(struct phy_device *phydev)
+ {
+@@ -1323,6 +1516,21 @@ static void vsc8584_get_base_addr(struct phy_device *phydev)
+ vsc8531->addr = addr;
+ }
+
++static void vsc85xx_coma_mode_release(struct phy_device *phydev)
++{
++ /* The coma mode (pin or reg) provides an optional feature that
++ * may be used to control when the PHYs become active.
++ * Alternatively the COMA_MODE pin may be connected low
++ * so that the PHYs are fully active once out of reset.
++ */
++
++ /* Enable output (mode=0) and write zero to it */
++ vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_EXTENDED_GPIO);
++ __phy_modify(phydev, MSCC_PHY_GPIO_CONTROL_2,
++ MSCC_PHY_COMA_MODE | MSCC_PHY_COMA_OUTPUT, 0);
++ vsc85xx_phy_write_page(phydev, MSCC_PHY_PAGE_STANDARD);
++}
++
+ static int vsc8584_config_init(struct phy_device *phydev)
+ {
+ struct vsc8531_private *vsc8531 = phydev->priv;
+@@ -1541,6 +1749,100 @@ static int vsc85xx_config_init(struct phy_device *phydev)
+ return 0;
+ }
+
++static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
++ u32 op)
++{
++ unsigned long deadline;
++ u32 val;
++ int ret;
++
++ ret = vsc85xx_csr_write(phydev, PHY_MCB_TARGET, reg,
++ op | (1 << mcb));
++ if (ret)
++ return -EINVAL;
++
++ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++ do {
++ usleep_range(500, 1000);
++ val = vsc85xx_csr_read(phydev, PHY_MCB_TARGET, reg);
++
++ if (val == 0xffffffff)
++ return -EIO;
++
++ } while (time_before(jiffies, deadline) && (val & op));
++
++ if (val & op)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++/* Trigger a read to the specified MCB */
++int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
++{
++ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
++}
++
++/* Trigger a write to the specified MCB */
++int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
++{
++ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
++}
++
++static int vsc8514_config_host_serdes(struct phy_device *phydev)
++{
++ int ret;
++ u16 val;
++
++ ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++ MSCC_PHY_PAGE_EXTENDED_GPIO);
++ if (ret)
++ return ret;
++
++ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
++ val &= ~MAC_CFG_MASK;
++ val |= MAC_CFG_QSGMII;
++ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
++ if (ret)
++ return ret;
++
++ ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
++ MSCC_PHY_PAGE_STANDARD);
++ if (ret)
++ return ret;
++
++ ret = vsc8584_cmd(phydev, PROC_CMD_NOP);
++ if (ret)
++ return ret;
++
++ ret = vsc8584_cmd(phydev,
++ PROC_CMD_MCB_ACCESS_MAC_CONF |
++ PROC_CMD_RST_CONF_PORT |
++ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
++ if (ret) {
++ dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
++ __func__, ret);
++ return ret;
++ }
++
++ /* Apply 6G SerDes FOJI Algorithm
++ * Initial condition requirement:
++ * 1. hold 8051 in reset
++ * 2. disable patch vector 0, in order to allow IB cal poll during FoJi
++ * 3. deassert 8051 reset after change patch vector status
++ * 4. proceed with FoJi (vsc85xx_sd6g_config_v2)
++ */
++ vsc8584_micro_assert_reset(phydev);
++ val = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
++ /* clear bit 8, to disable patch vector 0 */
++ val &= ~PATCH_VEC_ZERO_EN;
++ ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, val);
++ /* Enable 8051 clock, don't set patch present, disable PRAM clock override */
++ vsc8584_micro_deassert_reset(phydev, false);
++
++ return vsc85xx_sd6g_config_v2(phydev);
++}
++
+ static int vsc8514_config_pre_init(struct phy_device *phydev)
+ {
+ /* These are the settings to override the silicon default
+@@ -1569,8 +1871,16 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
++ struct device *dev = &phydev->mdio.dev;
+ unsigned int i;
+ u16 reg;
++ int ret;
++
++ ret = vsc8584_pll5g_reset(phydev);
++ if (ret < 0) {
++ dev_err(dev, "failed LCPLL reset, ret: %d\n", ret);
++ return ret;
++ }
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+@@ -1602,151 +1912,48 @@ static int vsc8514_config_pre_init(struct phy_device *phydev)
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+- return 0;
+-}
+-
+-static u32 vsc85xx_csr_ctrl_phy_read(struct phy_device *phydev,
+- u32 target, u32 reg)
+-{
+- unsigned long deadline;
+- u32 val, val_l, val_h;
+-
+- phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+-
+- /* CSR registers are grouped under different Target IDs.
+- * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+- * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+- * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+- * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+- */
+-
+- /* Setup the Target ID */
+- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+- MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+-
+- /* Trigger CSR Action - Read into the CSR's */
+- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+- MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
+- MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+- MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+-
+- /* Wait for register access*/
+- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+- do {
+- usleep_range(500, 1000);
+- val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+- } while (time_before(jiffies, deadline) &&
+- !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+-
+- if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+- return 0xffffffff;
+-
+- /* Read the Least Significant Word (LSW) (17) */
+- val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
+-
+- /* Read the Most Significant Word (MSW) (18) */
+- val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
+-
+- phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+- MSCC_PHY_PAGE_STANDARD);
+-
+- return (val_h << 16) | val_l;
+-}
+-
+-static int vsc85xx_csr_ctrl_phy_write(struct phy_device *phydev,
+- u32 target, u32 reg, u32 val)
+-{
+- unsigned long deadline;
+-
+- phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+-
+- /* CSR registers are grouped under different Target IDs.
+- * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+- * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+- * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+- * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
++ /* Add pre-patching commands to:
++ * 1. enable 8051 clock, operate 8051 clock at 125 MHz
++ * instead of HW default 62.5MHz
++ * 2. write patch vector 0, to skip IB cal polling executed
++ * as part of the 0x80E0 ROM command
+ */
++ vsc8584_micro_deassert_reset(phydev, false);
+
+- /* Setup the Target ID */
+- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+- MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+-
+- /* Write the Least Significant Word (LSW) (17) */
+- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
+-
+- /* Write the Most Significant Word (MSW) (18) */
+- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
+-
+- /* Trigger CSR Action - Write into the CSR's */
+- phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+- MSCC_PHY_CSR_CNTL_19_CMD |
+- MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+- MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+-
+- /* Wait for register access */
+- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+- do {
+- usleep_range(500, 1000);
+- val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+- } while (time_before(jiffies, deadline) &&
+- !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+-
+- if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+- return -ETIMEDOUT;
+-
++ vsc8584_micro_assert_reset(phydev);
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+- MSCC_PHY_PAGE_STANDARD);
+-
+- return 0;
+-}
+-
+-static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
+- u32 op)
+-{
+- unsigned long deadline;
+- u32 val;
+- int ret;
+-
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, reg,
+- op | (1 << mcb));
++ MSCC_PHY_PAGE_EXTENDED_GPIO);
++ /* ROM address to trap, for patch vector 0 */
++ reg = MSCC_ROM_TRAP_SERDES_6G_CFG;
++ ret = phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), reg);
+ if (ret)
+- return -EINVAL;
+-
+- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+- do {
+- usleep_range(500, 1000);
+- val = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, reg);
+-
+- if (val == 0xffffffff)
+- return -EIO;
+-
+- } while (time_before(jiffies, deadline) && (val & op));
+-
+- if (val & op)
+- return -ETIMEDOUT;
+-
+- return 0;
+-}
+-
+-/* Trigger a read to the specified MCB */
+-static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+-{
+- return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
+-}
++ goto err;
++ /* RAM address to jump to, when patch vector 0 enabled */
++ reg = MSCC_RAM_TRAP_SERDES_6G_CFG;
++ ret = phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), reg);
++ if (ret)
++ goto err;
++ reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL);
++ reg |= PATCH_VEC_ZERO_EN; /* bit 8, enable patch vector 0 */
++ ret = phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg);
++ if (ret)
++ goto err;
+
+-/* Trigger a write to the specified MCB */
+-static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+-{
+- return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
++ /* Enable 8051 clock, don't set patch present
++ * yet, disable PRAM clock override
++ */
++ vsc8584_micro_deassert_reset(phydev, false);
++ return ret;
++ err:
++ /* restore 8051 and bail w error */
++ vsc8584_micro_deassert_reset(phydev, false);
++ return ret;
+ }
+
+ static int vsc8514_config_init(struct phy_device *phydev)
+ {
+ struct vsc8531_private *vsc8531 = phydev->priv;
+- unsigned long deadline;
+ int ret, i;
+- u16 val;
+- u32 reg;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+@@ -1763,123 +1970,14 @@ static int vsc8514_config_init(struct phy_device *phydev)
+ * do the correct init sequence for all PHYs that are package-critical
+ * in this pre-init function.
+ */
+- if (phy_package_init_once(phydev))
+- vsc8514_config_pre_init(phydev);
+-
+- ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+- MSCC_PHY_PAGE_EXTENDED_GPIO);
+- if (ret)
+- goto err;
+-
+- val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+-
+- val &= ~MAC_CFG_MASK;
+- val |= MAC_CFG_QSGMII;
+- ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+- if (ret)
+- goto err;
+-
+- ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+- MSCC_PHY_PAGE_STANDARD);
+- if (ret)
+- goto err;
+-
+- ret = vsc8584_cmd(phydev,
+- PROC_CMD_MCB_ACCESS_MAC_CONF |
+- PROC_CMD_RST_CONF_PORT |
+- PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
+- if (ret)
+- goto err;
+-
+- /* 6g mcb */
+- phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+- /* lcpll mcb */
+- phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+- /* pll5gcfg0 */
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+- PHY_S6G_PLL5G_CFG0, 0x7036f145);
+- if (ret)
+- goto err;
+-
+- phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+- /* pllcfg */
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+- PHY_S6G_PLL_CFG,
+- (3 << PHY_S6G_PLL_ENA_OFFS_POS) |
+- (120 << PHY_S6G_PLL_FSM_CTRL_DATA_POS)
+- | (0 << PHY_S6G_PLL_FSM_ENA_POS));
+- if (ret)
+- goto err;
+-
+- /* commoncfg */
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+- PHY_S6G_COMMON_CFG,
+- (0 << PHY_S6G_SYS_RST_POS) |
+- (0 << PHY_S6G_ENA_LANE_POS) |
+- (0 << PHY_S6G_ENA_LOOP_POS) |
+- (0 << PHY_S6G_QRATE_POS) |
+- (3 << PHY_S6G_IF_MODE_POS));
+- if (ret)
+- goto err;
+-
+- /* misccfg */
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+- PHY_S6G_MISC_CFG, 1);
+- if (ret)
+- goto err;
+-
+- /* gpcfg */
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+- PHY_S6G_GPC_CFG, 768);
+- if (ret)
+- goto err;
+-
+- phy_commit_mcb_s6g(phydev, PHY_S6G_DFT_CFG2, 0);
+-
+- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+- do {
+- usleep_range(500, 1000);
+- phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+- 0); /* read 6G MCB into CSRs */
+- reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+- PHY_S6G_PLL_STATUS);
+- if (reg == 0xffffffff) {
+- phy_unlock_mdio_bus(phydev);
+- return -EIO;
+- }
+-
+- } while (time_before(jiffies, deadline) && (reg & BIT(12)));
+-
+- if (reg & BIT(12)) {
+- phy_unlock_mdio_bus(phydev);
+- return -ETIMEDOUT;
+- }
+-
+- /* misccfg */
+- ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+- PHY_S6G_MISC_CFG, 0);
+- if (ret)
+- goto err;
+-
+- phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+-
+- deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+- do {
+- usleep_range(500, 1000);
+- phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+- 0); /* read 6G MCB into CSRs */
+- reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+- PHY_S6G_IB_STATUS0);
+- if (reg == 0xffffffff) {
+- phy_unlock_mdio_bus(phydev);
+- return -EIO;
+- }
+-
+- } while (time_before(jiffies, deadline) && !(reg & BIT(8)));
+-
+- if (!(reg & BIT(8))) {
+- phy_unlock_mdio_bus(phydev);
+- return -ETIMEDOUT;
++ if (phy_package_init_once(phydev)) {
++ ret = vsc8514_config_pre_init(phydev);
++ if (ret)
++ goto err;
++ ret = vsc8514_config_host_serdes(phydev);
++ if (ret)
++ goto err;
++ vsc85xx_coma_mode_release(phydev);
+ }
+
+ phy_unlock_mdio_bus(phydev);
+diff --git a/drivers/net/phy/mscc/mscc_serdes.c b/drivers/net/phy/mscc/mscc_serdes.c
+new file mode 100644
+index 0000000000000..b3e854f53d675
+--- /dev/null
++++ b/drivers/net/phy/mscc/mscc_serdes.c
+@@ -0,0 +1,650 @@
++// SPDX-License-Identifier: (GPL-2.0 OR MIT)
++/*
++ * Driver for Microsemi VSC85xx PHYs
++ *
++ * Author: Bjarni Jonasson
++ * License: Dual MIT/GPL
++ * Copyright (c) 2021 Microsemi Corporation
++ */
++
++#include
++#include "mscc_serdes.h"
++#include "mscc.h"
++
++static int pll5g_detune(struct phy_device *phydev)
++{
++ u32 rd_dat;
++ int ret;
++
++ rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
++ rd_dat &= ~PHY_S6G_PLL5G_CFG2_GAIN_MASK;
++ rd_dat |= PHY_S6G_PLL5G_CFG2_ENA_GAIN;
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_PLL5G_CFG2, rd_dat);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int pll5g_tune(struct phy_device *phydev)
++{
++ u32 rd_dat;
++ int ret;
++
++ rd_dat = vsc85xx_csr_read(phydev, MACRO_CTRL, PHY_S6G_PLL5G_CFG2);
++ rd_dat &= ~PHY_S6G_PLL5G_CFG2_ENA_GAIN;
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_PLL5G_CFG2, rd_dat);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_pll_cfg_wr(struct phy_device *phydev,
++ const u32 pll_ena_offs,
++ const u32 pll_fsm_ctrl_data,
++ const u32 pll_fsm_ena)
++{
++ int ret;
++
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_PLL_CFG,
++ (pll_fsm_ena << PHY_S6G_PLL_ENA_OFFS_POS) |
++ (pll_fsm_ctrl_data << PHY_S6G_PLL_FSM_CTRL_DATA_POS) |
++ (pll_ena_offs << PHY_S6G_PLL_FSM_ENA_POS));
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_common_cfg_wr(struct phy_device *phydev,
++ const u32 sys_rst,
++ const u32 ena_lane,
++ const u32 ena_loop,
++ const u32 qrate,
++ const u32 if_mode,
++ const u32 pwd_tx)
++{
++ /* ena_loop = 8 for eloop */
++ /* = 4 for floop */
++ /* = 2 for iloop */
++ /* = 1 for ploop */
++ /* qrate = 1 for SGMII, 0 for QSGMII */
++ /* if_mode = 1 for SGMII, 3 for QSGMII */
++
++ int ret;
++
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_COMMON_CFG,
++ (sys_rst << PHY_S6G_SYS_RST_POS) |
++ (ena_lane << PHY_S6G_ENA_LANE_POS) |
++ (ena_loop << PHY_S6G_ENA_LOOP_POS) |
++ (qrate << PHY_S6G_QRATE_POS) |
++ (if_mode << PHY_S6G_IF_MODE_POS));
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_des_cfg_wr(struct phy_device *phydev,
++ const u32 des_phy_ctrl,
++ const u32 des_mbtr_ctrl,
++ const u32 des_bw_hyst,
++ const u32 des_bw_ana,
++ const u32 des_cpmd_sel)
++{
++ u32 reg_val;
++ int ret;
++
++ /* configurable terms */
++ reg_val = (des_phy_ctrl << PHY_S6G_DES_PHY_CTRL_POS) |
++ (des_mbtr_ctrl << PHY_S6G_DES_MBTR_CTRL_POS) |
++ (des_cpmd_sel << PHY_S6G_DES_CPMD_SEL_POS) |
++ (des_bw_hyst << PHY_S6G_DES_BW_HYST_POS) |
++ (des_bw_ana << PHY_S6G_DES_BW_ANA_POS);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_DES_CFG,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg0_wr(struct phy_device *phydev,
++ const u32 ib_rtrm_adj,
++ const u32 ib_sig_det_clk_sel,
++ const u32 ib_reg_pat_sel_offset,
++ const u32 ib_cal_ena)
++{
++ u32 base_val;
++ u32 reg_val;
++ int ret;
++
++ /* constant terms */
++ base_val = 0x60a85837;
++ /* configurable terms */
++ reg_val = base_val | (ib_rtrm_adj << 25) |
++ (ib_sig_det_clk_sel << 16) |
++ (ib_reg_pat_sel_offset << 8) |
++ (ib_cal_ena << 3);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_IB_CFG0,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg1_wr(struct phy_device *phydev,
++ const u32 ib_tjtag,
++ const u32 ib_tsdet,
++ const u32 ib_scaly,
++ const u32 ib_frc_offset,
++ const u32 ib_filt_offset)
++{
++ u32 ib_filt_val;
++ u32 reg_val = 0;
++ int ret;
++
++ /* constant terms */
++ ib_filt_val = 0xe0;
++ /* configurable terms */
++ reg_val = (ib_tjtag << 17) + (ib_tsdet << 12) + (ib_scaly << 8) +
++ ib_filt_val + (ib_filt_offset << 4) + (ib_frc_offset << 0);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_IB_CFG1,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg2_wr(struct phy_device *phydev,
++ const u32 ib_tinfv,
++ const u32 ib_tcalv,
++ const u32 ib_ureg)
++{
++ u32 ib_cfg2_val;
++ u32 base_val;
++ int ret;
++
++ /* constant terms */
++ base_val = 0x0f878010;
++ /* configurable terms */
++ ib_cfg2_val = base_val | ((ib_tinfv) << 28) | ((ib_tcalv) << 5) |
++ (ib_ureg << 0);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_IB_CFG2,
++ ib_cfg2_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg3_wr(struct phy_device *phydev,
++ const u32 ib_ini_hp,
++ const u32 ib_ini_mid,
++ const u32 ib_ini_lp,
++ const u32 ib_ini_offset)
++{
++ u32 reg_val;
++ int ret;
++
++ reg_val = (ib_ini_hp << 24) + (ib_ini_mid << 16) +
++ (ib_ini_lp << 8) + (ib_ini_offset << 0);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_IB_CFG3,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_ib_cfg4_wr(struct phy_device *phydev,
++ const u32 ib_max_hp,
++ const u32 ib_max_mid,
++ const u32 ib_max_lp,
++ const u32 ib_max_offset)
++{
++ u32 reg_val;
++ int ret;
++
++ reg_val = (ib_max_hp << 24) + (ib_max_mid << 16) +
++ (ib_max_lp << 8) + (ib_max_offset << 0);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_IB_CFG4,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_misc_cfg_wr(struct phy_device *phydev,
++ const u32 lane_rst)
++{
++ int ret;
++
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_MISC_CFG,
++ lane_rst);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_gp_cfg_wr(struct phy_device *phydev, const u32 gp_cfg_val)
++{
++ int ret;
++
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_GP_CFG,
++ gp_cfg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_dft_cfg2_wr(struct phy_device *phydev,
++ const u32 rx_ji_ampl,
++ const u32 rx_step_freq,
++ const u32 rx_ji_ena,
++ const u32 rx_waveform_sel,
++ const u32 rx_freqoff_dir,
++ const u32 rx_freqoff_ena)
++{
++ u32 reg_val;
++ int ret;
++
++ /* configurable terms */
++ reg_val = (rx_ji_ampl << 8) | (rx_step_freq << 4) |
++ (rx_ji_ena << 3) | (rx_waveform_sel << 2) |
++ (rx_freqoff_dir << 1) | rx_freqoff_ena;
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_IB_DFT_CFG2,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++static int vsc85xx_sd6g_dft_cfg0_wr(struct phy_device *phydev,
++ const u32 prbs_sel,
++ const u32 test_mode,
++ const u32 rx_dft_ena)
++{
++ u32 reg_val;
++ int ret;
++
++ /* configurable terms */
++ reg_val = (prbs_sel << 20) | (test_mode << 16) | (rx_dft_ena << 2);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_DFT_CFG0,
++ reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++/* Access LCPLL Cfg_0 */
++static int vsc85xx_pll5g_cfg0_wr(struct phy_device *phydev,
++ const u32 selbgv820)
++{
++ u32 base_val;
++ u32 reg_val;
++ int ret;
++
++ /* constant terms */
++ base_val = 0x7036f145;
++ /* configurable terms */
++ reg_val = base_val | (selbgv820 << 23);
++ ret = vsc85xx_csr_write(phydev, MACRO_CTRL,
++ PHY_S6G_PLL5G_CFG0, reg_val);
++ if (ret)
++ dev_err(&phydev->mdio.dev, "%s: write error\n", __func__);
++ return ret;
++}
++
++int vsc85xx_sd6g_config_v2(struct phy_device *phydev)
++{
++ u32 ib_sig_det_clk_sel_cal = 0;
++ u32 ib_sig_det_clk_sel_mm = 7;
++ u32 pll_fsm_ctrl_data = 60;
++ unsigned long deadline;
++ u32 des_bw_ana_val = 3;
++ u32 ib_tsdet_cal = 16;
++ u32 ib_tsdet_mm = 5;
++ u32 ib_rtrm_adj;
++ u32 if_mode = 1;
++ u32 gp_iter = 5;
++ u32 val32 = 0;
++ u32 qrate = 1;
++ u32 iter;
++ int val = 0;
++ int ret;
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
++
++ /* Detune/Unlock LCPLL */
++ ret = pll5g_detune(phydev);
++ if (ret)
++ return ret;
++
++ /* 0. Reset RCPLL */
++ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 0, 0, 0, qrate, if_mode, 0);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
++ if (ret)
++ return ret;
++
++ /* 1. Configure sd6g for SGMII prior to sd6g_IB_CAL */
++ ib_rtrm_adj = 13;
++ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg2_wr(phydev, 3, 13, 5);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg3_wr(phydev, 0, 31, 1, 31);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg4_wr(phydev, 63, 63, 2, 63);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 2. Start rcpll_fsm */
++ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++ do {
++ usleep_range(500, 1000);
++ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
++ PHY_S6G_PLL_STATUS);
++ /* wait for bit 12 to clear */
++ } while (time_before(jiffies, deadline) && (val32 & BIT(12)));
++
++ if (val32 & BIT(12))
++ return -ETIMEDOUT;
++
++ /* 4. Release digital reset and disable transmitter */
++ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 5. Apply a frequency offset on RX-side (using internal FoJi logic) */
++ ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 768);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_dft_cfg2_wr(phydev, 0, 2, 0, 0, 0, 1);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_dft_cfg0_wr(phydev, 0, 0, 1);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 2);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 6. Prepare required settings for IBCAL */
++ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 1, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_cal, 0, 0);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 7. Start IB_CAL */
++ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj,
++ ib_sig_det_clk_sel_cal, 0, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ /* 11 cycles (for ViperA) or 5 cycles (for ViperB & Elise) w/ SW clock */
++ for (iter = 0; iter < gp_iter; iter++) {
++ /* set gp(0) */
++ ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 769);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ /* clear gp(0) */
++ ret = vsc85xx_sd6g_gp_cfg_wr(phydev, 768);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ }
++
++ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 1, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_cal, 15, 0, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 8. Wait for IB cal to complete */
++ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++ do {
++ usleep_range(500, 1000);
++ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
++ PHY_S6G_IB_STATUS0);
++ /* wait for bit 8 to set */
++ } while (time_before(jiffies, deadline) && (~val32 & BIT(8)));
++
++ if (~val32 & BIT(8))
++ return -ETIMEDOUT;
++
++ /* 9. Restore cfg values for mission mode */
++ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 1);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 10. Re-enable transmitter */
++ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 11. Disable frequency offset generation (using internal FoJi logic) */
++ ret = vsc85xx_sd6g_dft_cfg2_wr(phydev, 0, 0, 0, 0, 0, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_dft_cfg0_wr(phydev, 0, 0, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* Tune/Re-lock LCPLL */
++ ret = pll5g_tune(phydev);
++ if (ret)
++ return ret;
++
++ /* 12. Configure for Final Configuration and Settings */
++ /* a. Reset RCPLL */
++ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 0, 1, 0, qrate, if_mode, 0);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* b. Configure sd6g for desired operating mode */
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_GPIO);
++ ret = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
++ if ((ret & MAC_CFG_MASK) == MAC_CFG_QSGMII) {
++ /* QSGMII */
++ pll_fsm_ctrl_data = 120;
++ qrate = 0;
++ if_mode = 3;
++ des_bw_ana_val = 5;
++ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
++ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC;
++
++ ret = vsc8584_cmd(phydev, val);
++ if (ret) {
++ dev_err(&phydev->mdio.dev, "%s: QSGMII error: %d\n",
++ __func__, ret);
++ return ret;
++ }
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
++ } else if ((ret & MAC_CFG_MASK) == MAC_CFG_SGMII) {
++ /* SGMII */
++ pll_fsm_ctrl_data = 60;
++ qrate = 1;
++ if_mode = 1;
++ des_bw_ana_val = 3;
++
++ val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
++ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_SGMII_MAC;
++
++ ret = vsc8584_cmd(phydev, val);
++ if (ret) {
++ dev_err(&phydev->mdio.dev, "%s: SGMII error: %d\n",
++ __func__, ret);
++ return ret;
++ }
++
++ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
++ } else {
++ dev_err(&phydev->mdio.dev, "%s: invalid mac_if: %x\n",
++ __func__, ret);
++ }
++
++ ret = phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
++ if (ret)
++ return ret;
++ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_pll5g_cfg0_wr(phydev, 4);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_des_cfg_wr(phydev, 6, 2, 5, des_bw_ana_val, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg0_wr(phydev, ib_rtrm_adj, ib_sig_det_clk_sel_mm, 0, 1);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg1_wr(phydev, 8, ib_tsdet_mm, 15, 0, 1);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_common_cfg_wr(phydev, 1, 1, 0, qrate, if_mode, 0);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg2_wr(phydev, 3, 13, 5);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg3_wr(phydev, 0, 31, 1, 31);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_ib_cfg4_wr(phydev, 63, 63, 2, 63);
++ if (ret)
++ return ret;
++ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 13. Start rcpll_fsm */
++ ret = vsc85xx_sd6g_pll_cfg_wr(phydev, 3, pll_fsm_ctrl_data, 1);
++ if (ret)
++ return ret;
++ ret = phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++
++ /* 14. Wait for PLL cal to complete */
++ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++ do {
++ usleep_range(500, 1000);
++ ret = phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++ if (ret)
++ return ret;
++ val32 = vsc85xx_csr_read(phydev, MACRO_CTRL,
++ PHY_S6G_PLL_STATUS);
++ /* wait for bit 12 to clear */
++ } while (time_before(jiffies, deadline) && (val32 & BIT(12)));
++
++ if (val32 & BIT(12))
++ return -ETIMEDOUT;
++
++ /* release lane reset */
++ ret = vsc85xx_sd6g_misc_cfg_wr(phydev, 0);
++ if (ret)
++ return ret;
++
++ return phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
++}
+diff --git a/drivers/net/phy/mscc/mscc_serdes.h b/drivers/net/phy/mscc/mscc_serdes.h
+new file mode 100644
+index 0000000000000..2a6371322af91
+--- /dev/null
++++ b/drivers/net/phy/mscc/mscc_serdes.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
++/*
++ * Driver for Microsemi VSC85xx PHYs
++ *
++ * Copyright (c) 2021 Microsemi Corporation
++ */
++
++#ifndef _MSCC_SERDES_PHY_H_
++#define _MSCC_SERDES_PHY_H_
++
++#define PHY_S6G_PLL5G_CFG2_GAIN_MASK GENMASK(9, 5)
++#define PHY_S6G_PLL5G_CFG2_ENA_GAIN 1
++
++#define PHY_S6G_DES_PHY_CTRL_POS 13
++#define PHY_S6G_DES_MBTR_CTRL_POS 10
++#define PHY_S6G_DES_CPMD_SEL_POS 8
++#define PHY_S6G_DES_BW_HYST_POS 5
++#define PHY_S6G_DES_BW_ANA_POS 1
++#define PHY_S6G_DES_CFG 0x21
++#define PHY_S6G_IB_CFG0 0x22
++#define PHY_S6G_IB_CFG1 0x23
++#define PHY_S6G_IB_CFG2 0x24
++#define PHY_S6G_IB_CFG3 0x25
++#define PHY_S6G_IB_CFG4 0x26
++#define PHY_S6G_GP_CFG 0x2E
++#define PHY_S6G_DFT_CFG0 0x35
++#define PHY_S6G_IB_DFT_CFG2 0x37
++
++int vsc85xx_sd6g_config_v2(struct phy_device *phydev);
++
++#endif /* _MSCC_PHY_SERDES_H_ */
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 80c2e646c0934..71169e7d6177d 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -300,50 +300,22 @@ static int mdio_bus_phy_resume(struct device *dev)
+
+ phydev->suspended_by_mdio_bus = 0;
+
+- ret = phy_resume(phydev);
++ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ return ret;
+
+-no_resume:
+- if (phydev->attached_dev && phydev->adjust_link)
+- phy_start_machine(phydev);
+-
+- return 0;
+-}
+-
+-static int mdio_bus_phy_restore(struct device *dev)
+-{
+- struct phy_device *phydev = to_phy_device(dev);
+- struct net_device *netdev = phydev->attached_dev;
+- int ret;
+-
+- if (!netdev)
+- return 0;
+-
+- ret = phy_init_hw(phydev);
++ ret = phy_resume(phydev);
+ if (ret < 0)
+ return ret;
+-
++no_resume:
+ if (phydev->attached_dev && phydev->adjust_link)
+ phy_start_machine(phydev);
+
+ return 0;
+ }
+
+-static const struct dev_pm_ops mdio_bus_phy_pm_ops = {
+- .suspend = mdio_bus_phy_suspend,
+- .resume = mdio_bus_phy_resume,
+- .freeze = mdio_bus_phy_suspend,
+- .thaw = mdio_bus_phy_resume,
+- .restore = mdio_bus_phy_restore,
+-};
+-
+-#define MDIO_BUS_PHY_PM_OPS (&mdio_bus_phy_pm_ops)
+-
+-#else
+-
+-#define MDIO_BUS_PHY_PM_OPS NULL
+-
++static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
++ mdio_bus_phy_resume);
+ #endif /* CONFIG_PM */
+
+ /**
+@@ -554,7 +526,7 @@ static const struct device_type mdio_bus_phy_type = {
+ .name = "PHY",
+ .groups = phy_dev_groups,
+ .release = phy_device_release,
+- .pm = MDIO_BUS_PHY_PM_OPS,
++ .pm = pm_ptr(&mdio_bus_phy_pm_ops),
+ };
+
+ static int phy_request_driver_module(struct phy_device *dev, u32 phy_id)
+@@ -1143,10 +1115,19 @@ int phy_init_hw(struct phy_device *phydev)
+ if (ret < 0)
+ return ret;
+
+- if (phydev->drv->config_init)
++ if (phydev->drv->config_init) {
+ ret = phydev->drv->config_init(phydev);
++ if (ret < 0)
++ return ret;
++ }
+
+- return ret;
++ if (phydev->drv->config_intr) {
++ ret = phydev->drv->config_intr(phydev);
++ if (ret < 0)
++ return ret;
++ }
++
++ return 0;
+ }
+ EXPORT_SYMBOL(phy_init_hw);
+
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 91d74c1a920ab..f2b5e467a8001 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -336,19 +336,11 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
+ size_t len)
+ {
+ struct i2c_msg msgs[2];
+- size_t block_size;
++ u8 bus_addr = a2 ? 0x51 : 0x50;
++ size_t block_size = sfp->i2c_block_size;
+ size_t this_len;
+- u8 bus_addr;
+ int ret;
+
+- if (a2) {
+- block_size = 16;
+- bus_addr = 0x51;
+- } else {
+- block_size = sfp->i2c_block_size;
+- bus_addr = 0x50;
+- }
+-
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+@@ -1282,6 +1274,20 @@ static void sfp_hwmon_probe(struct work_struct *work)
+ struct sfp *sfp = container_of(work, struct sfp, hwmon_probe.work);
+ int err, i;
+
++ /* hwmon interface needs to access 16bit registers in atomic way to
++ * guarantee coherency of the diagnostic monitoring data. If it is not
++ * possible to guarantee coherency because EEPROM is broken in such way
++ * that does not support atomic 16bit read operation then we have to
++ * skip registration of hwmon device.
++ */
++ if (sfp->i2c_block_size < 2) {
++ dev_info(sfp->dev,
++ "skipping hwmon device registration due to broken EEPROM\n");
++ dev_info(sfp->dev,
++ "diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
++ return;
++ }
++
+ err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
+ if (err < 0) {
+ if (sfp->hwmon_tries--) {
+@@ -1642,26 +1648,30 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
+ return 0;
+ }
+
+-/* Some modules (Nokia 3FE46541AA) lock up if byte 0x51 is read as a
+- * single read. Switch back to reading 16 byte blocks unless we have
+- * a CarlitoxxPro module (rebranded VSOL V2801F). Even more annoyingly,
+- * some VSOL V2801F have the vendor name changed to OEM.
++/* GPON modules based on Realtek RTL8672 and RTL9601C chips (e.g. V-SOL
++ * V2801F, CarlitoxxPro CPGOS03-0490, Ubiquiti U-Fiber Instant, ...) do
++ * not support multibyte reads from the EEPROM. Each multi-byte read
++ * operation returns just one byte of EEPROM followed by zeros. There is
++ * no way to identify which modules are using Realtek RTL8672 and RTL9601C
++ * chips. Moreover every OEM of V-SOL V2801F module puts its own vendor
++ * name and vendor id into EEPROM, so there is even no way to detect if
++ * module is V-SOL V2801F. Therefore check for those zeros in the read
++ * data and then based on check switch to reading EEPROM to one byte
++ * at a time.
+ */
+-static int sfp_quirk_i2c_block_size(const struct sfp_eeprom_base *base)
++static bool sfp_id_needs_byte_io(struct sfp *sfp, void *buf, size_t len)
+ {
+- if (!memcmp(base->vendor_name, "VSOL ", 16))
+- return 1;
+- if (!memcmp(base->vendor_name, "OEM ", 16) &&
+- !memcmp(base->vendor_pn, "V2801F ", 16))
+- return 1;
++ size_t i, block_size = sfp->i2c_block_size;
+
+- /* Some modules can't cope with long reads */
+- return 16;
+-}
++ /* Already using byte IO */
++ if (block_size == 1)
++ return false;
+
+-static void sfp_quirks_base(struct sfp *sfp, const struct sfp_eeprom_base *base)
+-{
+- sfp->i2c_block_size = sfp_quirk_i2c_block_size(base);
++ for (i = 1; i < len; i += block_size) {
++ if (memchr_inv(buf + i, '\0', min(block_size - 1, len - i)))
++ return false;
++ }
++ return true;
+ }
+
+ static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
+@@ -1705,11 +1715,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ u8 check;
+ int ret;
+
+- /* Some modules (CarlitoxxPro CPGOS03-0490) do not support multibyte
+- * reads from the EEPROM, so start by reading the base identifying
+- * information one byte at a time.
++ /* Some SFP modules and also some Linux I2C drivers do not like reads
++ * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
++ * a time.
+ */
+- sfp->i2c_block_size = 1;
++ sfp->i2c_block_size = 16;
+
+ ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
+ if (ret < 0) {
+@@ -1723,6 +1733,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ return -EAGAIN;
+ }
+
++ /* Some SFP modules (e.g. Nokia 3FE46541AA) lock up if read from
++ * address 0x51 is just one byte at a time. Also SFF-8472 requires
++ * that EEPROM supports atomic 16bit read operation for diagnostic
++ * fields, so do not switch to one byte reading at a time unless it
++ * is really required and we have no other option.
++ */
++ if (sfp_id_needs_byte_io(sfp, &id.base, sizeof(id.base))) {
++ dev_info(sfp->dev,
++ "Detected broken RTL8672/RTL9601C emulated EEPROM\n");
++ dev_info(sfp->dev,
++ "Switching to reading EEPROM to one byte at a time\n");
++ sfp->i2c_block_size = 1;
++
++ ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
++ if (ret < 0) {
++ if (report)
++ dev_err(sfp->dev, "failed to read EEPROM: %d\n",
++ ret);
++ return -EAGAIN;
++ }
++
++ if (ret != sizeof(id.base)) {
++ dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
++ return -EAGAIN;
++ }
++ }
++
+ /* Cotsworks do not seem to update the checksums when they
+ * do the final programming with the final module part number,
+ * serial number and date code.
+@@ -1757,9 +1794,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ }
+ }
+
+- /* Apply any early module-specific quirks */
+- sfp_quirks_base(sfp, &id.base);
+-
+ ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext));
+ if (ret < 0) {
+ if (report)
+diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
+index 29a0917a81e60..f14a9d190de91 100644
+--- a/drivers/net/ppp/ppp_async.c
++++ b/drivers/net/ppp/ppp_async.c
+@@ -259,7 +259,8 @@ static int ppp_asynctty_hangup(struct tty_struct *tty)
+ */
+ static ssize_t
+ ppp_asynctty_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t count)
++ unsigned char *buf, size_t count,
++ void **cookie, unsigned long offset)
+ {
+ return -EAGAIN;
+ }
+diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
+index 0f338752c38b9..f774b7e52da44 100644
+--- a/drivers/net/ppp/ppp_synctty.c
++++ b/drivers/net/ppp/ppp_synctty.c
+@@ -257,7 +257,8 @@ static int ppp_sync_hangup(struct tty_struct *tty)
+ */
+ static ssize_t
+ ppp_sync_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t count)
++ unsigned char *buf, size_t count,
++ void **cookie, unsigned long offset)
+ {
+ return -EAGAIN;
+ }
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index a8ad710629e69..0842371eca3d6 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -4725,7 +4725,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan, *next;
+ struct net_device *dev, *aux;
+- unsigned int h;
+
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &vxlan_link_ops)
+@@ -4739,14 +4738,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+ unregister_netdevice_queue(vxlan->dev, head);
+ }
+
+- for (h = 0; h < PORT_HASH_SIZE; ++h)
+- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
+ }
+
+ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+ {
+ struct net *net;
+ LIST_HEAD(list);
++ unsigned int h;
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list) {
+@@ -4759,6 +4757,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
++
++ list_for_each_entry(net, net_list, exit_list) {
++ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
++
++ for (h = 0; h < PORT_HASH_SIZE; ++h)
++ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
++ }
+ }
+
+ static struct pernet_operations vxlan_net_ops = {
+diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
+index a3ed49cd95c31..b4d84c881c7d0 100644
+--- a/drivers/net/wireguard/device.c
++++ b/drivers/net/wireguard/device.c
+@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
+ dev->name, &ipv6_hdr(skb)->daddr);
+- goto err;
++ goto err_icmp;
+ }
+
+ family = READ_ONCE(peer->endpoint.addr.sa_family);
+@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ err_peer:
+ wg_peer_put(peer);
+-err:
+- ++dev->stats.tx_errors;
++err_icmp:
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
++err:
++ ++dev->stats.tx_errors;
+ kfree_skb(skb);
+ return ret;
+ }
+@@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev)
+ destroy_workqueue(wg->handshake_receive_wq);
+ destroy_workqueue(wg->handshake_send_wq);
+ destroy_workqueue(wg->packet_crypt_wq);
+- wg_packet_queue_free(&wg->decrypt_queue, true);
+- wg_packet_queue_free(&wg->encrypt_queue, true);
++ wg_packet_queue_free(&wg->decrypt_queue);
++ wg_packet_queue_free(&wg->encrypt_queue);
+ rcu_barrier(); /* Wait for all the peers to be actually freed. */
+ wg_ratelimiter_uninit();
+ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
+@@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
+ goto err_destroy_handshake_send;
+
+ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
+- true, MAX_QUEUED_PACKETS);
++ MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_destroy_packet_crypt;
+
+ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
+- true, MAX_QUEUED_PACKETS);
++ MAX_QUEUED_PACKETS);
+ if (ret < 0)
+ goto err_free_encrypt_queue;
+
+@@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
+ err_uninit_ratelimiter:
+ wg_ratelimiter_uninit();
+ err_free_decrypt_queue:
+- wg_packet_queue_free(&wg->decrypt_queue, true);
++ wg_packet_queue_free(&wg->decrypt_queue);
+ err_free_encrypt_queue:
+- wg_packet_queue_free(&wg->encrypt_queue, true);
++ wg_packet_queue_free(&wg->encrypt_queue);
+ err_destroy_packet_crypt:
+ destroy_workqueue(wg->packet_crypt_wq);
+ err_destroy_handshake_send:
+diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
+index 4d0144e169478..854bc3d97150e 100644
+--- a/drivers/net/wireguard/device.h
++++ b/drivers/net/wireguard/device.h
+@@ -27,13 +27,14 @@ struct multicore_worker {
+
+ struct crypt_queue {
+ struct ptr_ring ring;
+- union {
+- struct {
+- struct multicore_worker __percpu *worker;
+- int last_cpu;
+- };
+- struct work_struct work;
+- };
++ struct multicore_worker __percpu *worker;
++ int last_cpu;
++};
++
++struct prev_queue {
++ struct sk_buff *head, *tail, *peeked;
++ struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff.
++ atomic_t count;
+ };
+
+ struct wg_device {
+diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
+index b3b6370e6b959..cd5cb0292cb67 100644
+--- a/drivers/net/wireguard/peer.c
++++ b/drivers/net/wireguard/peer.c
+@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (unlikely(!peer))
+ return ERR_PTR(ret);
+- peer->device = wg;
++ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
++ goto err;
+
++ peer->device = wg;
+ wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+ public_key, preshared_key, peer);
+- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+- goto err_1;
+- if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
+- MAX_QUEUED_PACKETS))
+- goto err_2;
+- if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
+- MAX_QUEUED_PACKETS))
+- goto err_3;
+-
+ peer->internal_id = atomic64_inc_return(&peer_counter);
+ peer->serial_work_cpu = nr_cpumask_bits;
+ wg_cookie_init(&peer->latest_cookie);
+ wg_timers_init(peer);
+ wg_cookie_checker_precompute_peer_keys(peer);
+ spin_lock_init(&peer->keypairs.keypair_update_lock);
+- INIT_WORK(&peer->transmit_handshake_work,
+- wg_packet_handshake_send_worker);
++ INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
++ INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
++ wg_prev_queue_init(&peer->tx_queue);
++ wg_prev_queue_init(&peer->rx_queue);
+ rwlock_init(&peer->endpoint_lock);
+ kref_init(&peer->refcount);
+ skb_queue_head_init(&peer->staged_packet_queue);
+@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
+ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
+ return peer;
+
+-err_3:
+- wg_packet_queue_free(&peer->tx_queue, false);
+-err_2:
+- dst_cache_destroy(&peer->endpoint_cache);
+-err_1:
++err:
+ kfree(peer);
+ return ERR_PTR(ret);
+ }
+@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu)
+ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
+
+ dst_cache_destroy(&peer->endpoint_cache);
+- wg_packet_queue_free(&peer->rx_queue, false);
+- wg_packet_queue_free(&peer->tx_queue, false);
++ WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
+
+ /* The final zeroing takes care of clearing any remaining handshake key
+ * material and other potentially sensitive information.
+diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
+index 23af409229972..0809cda08bfa4 100644
+--- a/drivers/net/wireguard/peer.h
++++ b/drivers/net/wireguard/peer.h
+@@ -36,7 +36,7 @@ struct endpoint {
+
+ struct wg_peer {
+ struct wg_device *device;
+- struct crypt_queue tx_queue, rx_queue;
++ struct prev_queue tx_queue, rx_queue;
+ struct sk_buff_head staged_packet_queue;
+ int serial_work_cpu;
+ struct noise_keypairs keypairs;
+@@ -45,7 +45,7 @@ struct wg_peer {
+ rwlock_t endpoint_lock;
+ struct noise_handshake handshake;
+ atomic64_t last_sent_handshake;
+- struct work_struct transmit_handshake_work, clear_peer_work;
++ struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work;
+ struct cookie latest_cookie;
+ struct hlist_node pubkey_hash;
+ u64 rx_bytes, tx_bytes;
+diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
+index 71b8e80b58e12..48e7b982a3073 100644
+--- a/drivers/net/wireguard/queueing.c
++++ b/drivers/net/wireguard/queueing.c
+@@ -9,8 +9,7 @@ struct multicore_worker __percpu *
+ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+ {
+ int cpu;
+- struct multicore_worker __percpu *worker =
+- alloc_percpu(struct multicore_worker);
++ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
+
+ if (!worker)
+ return NULL;
+@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
+ }
+
+ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+- bool multicore, unsigned int len)
++ unsigned int len)
+ {
+ int ret;
+
+@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
+ if (ret)
+ return ret;
+- if (function) {
+- if (multicore) {
+- queue->worker = wg_packet_percpu_multicore_worker_alloc(
+- function, queue);
+- if (!queue->worker) {
+- ptr_ring_cleanup(&queue->ring, NULL);
+- return -ENOMEM;
+- }
+- } else {
+- INIT_WORK(&queue->work, function);
+- }
++ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
++ if (!queue->worker) {
++ ptr_ring_cleanup(&queue->ring, NULL);
++ return -ENOMEM;
+ }
+ return 0;
+ }
+
+-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
++void wg_packet_queue_free(struct crypt_queue *queue)
+ {
+- if (multicore)
+- free_percpu(queue->worker);
++ free_percpu(queue->worker);
+ WARN_ON(!__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, NULL);
+ }
++
++#define NEXT(skb) ((skb)->prev)
++#define STUB(queue) ((struct sk_buff *)&queue->empty)
++
++void wg_prev_queue_init(struct prev_queue *queue)
++{
++ NEXT(STUB(queue)) = NULL;
++ queue->head = queue->tail = STUB(queue);
++ queue->peeked = NULL;
++ atomic_set(&queue->count, 0);
++ BUILD_BUG_ON(
++ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
++ offsetof(struct prev_queue, empty) ||
++ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
++ offsetof(struct prev_queue, empty));
++}
++
++static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
++{
++ WRITE_ONCE(NEXT(skb), NULL);
++ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
++}
++
++bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
++{
++ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
++ return false;
++ __wg_prev_queue_enqueue(queue, skb);
++ return true;
++}
++
++struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
++{
++ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
++
++ if (tail == STUB(queue)) {
++ if (!next)
++ return NULL;
++ queue->tail = next;
++ tail = next;
++ next = smp_load_acquire(&NEXT(next));
++ }
++ if (next) {
++ queue->tail = next;
++ atomic_dec(&queue->count);
++ return tail;
++ }
++ if (tail != READ_ONCE(queue->head))
++ return NULL;
++ __wg_prev_queue_enqueue(queue, STUB(queue));
++ next = smp_load_acquire(&NEXT(tail));
++ if (next) {
++ queue->tail = next;
++ atomic_dec(&queue->count);
++ return tail;
++ }
++ return NULL;
++}
++
++#undef NEXT
++#undef STUB
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index dfb674e030764..4ef2944a68bc9 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -17,12 +17,13 @@ struct wg_device;
+ struct wg_peer;
+ struct multicore_worker;
+ struct crypt_queue;
++struct prev_queue;
+ struct sk_buff;
+
+ /* queueing.c APIs: */
+ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
+- bool multicore, unsigned int len);
+-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
++ unsigned int len);
++void wg_packet_queue_free(struct crypt_queue *queue);
+ struct multicore_worker __percpu *
+ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
+
+@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online(int *next)
+ return cpu;
+ }
+
++void wg_prev_queue_init(struct prev_queue *queue);
++
++/* Multi producer */
++bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
++
++/* Single consumer */
++struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
++
++/* Single consumer */
++static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
++{
++ if (queue->peeked)
++ return queue->peeked;
++ queue->peeked = wg_prev_queue_dequeue(queue);
++ return queue->peeked;
++}
++
++/* Single consumer */
++static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
++{
++ queue->peeked = NULL;
++}
++
+ static inline int wg_queue_enqueue_per_device_and_peer(
+- struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
++ struct crypt_queue *device_queue, struct prev_queue *peer_queue,
+ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+ {
+ int cpu;
+@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_device_and_peer(
+ /* We first queue this up for the peer ingestion, but the consumer
+ * will wait for the state to change to CRYPTED or DEAD before.
+ */
+- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
++ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
+ return -ENOSPC;
++
+ /* Then we queue it up in the device queue, which consumes the
+ * packet as soon as it can.
+ */
+@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
+ return 0;
+ }
+
+-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+- struct sk_buff *skb,
+- enum packet_state state)
++static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
+ {
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
+
+ atomic_set_release(&PACKET_CB(skb)->state, state);
+- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
+- peer->internal_id),
+- peer->device->packet_crypt_wq, &queue->work);
++ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
++ peer->device->packet_crypt_wq, &peer->transmit_packet_work);
+ wg_peer_put(peer);
+ }
+
+-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
+- enum packet_state state)
++static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
+ {
+ /* We take a reference, because as soon as we call atomic_set, the
+ * peer can be freed from below us.
+diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
+index 2c9551ea6dc73..7dc84bcca2613 100644
+--- a/drivers/net/wireguard/receive.c
++++ b/drivers/net/wireguard/receive.c
+@@ -444,7 +444,6 @@ packet_processed:
+ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+ {
+ struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
+- struct crypt_queue *queue = &peer->rx_queue;
+ struct noise_keypair *keypair;
+ struct endpoint endpoint;
+ enum packet_state state;
+@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
+ if (unlikely(budget <= 0))
+ return 0;
+
+- while ((skb = __ptr_ring_peek(&queue->ring)) != NULL &&
++ while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+- __ptr_ring_discard_one(&queue->ring);
+- peer = PACKET_PEER(skb);
++ wg_prev_queue_drop_peeked(&peer->rx_queue);
+ keypair = PACKET_CB(skb)->keypair;
+ free = true;
+
+@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct work_struct *work)
+ enum packet_state state =
+ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
+ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
+- wg_queue_enqueue_per_peer_napi(skb, state);
++ wg_queue_enqueue_per_peer_rx(skb, state);
+ if (need_resched())
+ cond_resched();
+ }
+@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+- ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue,
+- &peer->rx_queue, skb,
+- wg->packet_crypt_wq,
+- &wg->decrypt_queue.last_cpu);
++ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
++ wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+- wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD);
++ wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
+ if (likely(!ret || ret == -EPIPE)) {
+ rcu_read_unlock_bh();
+ return;
+diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
+index f74b9341ab0fe..5368f7c35b4bf 100644
+--- a/drivers/net/wireguard/send.c
++++ b/drivers/net/wireguard/send.c
+@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
+ wg_packet_send_staged_packets(peer);
+ }
+
+-static void wg_packet_create_data_done(struct sk_buff *first,
+- struct wg_peer *peer)
++static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
+ {
+ struct sk_buff *skb, *next;
+ bool is_keepalive, data_sent = false;
+@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(struct sk_buff *first,
+
+ void wg_packet_tx_worker(struct work_struct *work)
+ {
+- struct crypt_queue *queue = container_of(work, struct crypt_queue,
+- work);
++ struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
+ struct noise_keypair *keypair;
+ enum packet_state state;
+ struct sk_buff *first;
+- struct wg_peer *peer;
+
+- while ((first = __ptr_ring_peek(&queue->ring)) != NULL &&
++ while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
+ (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
+ PACKET_STATE_UNCRYPTED) {
+- __ptr_ring_discard_one(&queue->ring);
+- peer = PACKET_PEER(first);
++ wg_prev_queue_drop_peeked(&peer->tx_queue);
+ keypair = PACKET_CB(first)->keypair;
+
+ if (likely(state == PACKET_STATE_CRYPTED))
+- wg_packet_create_data_done(first, peer);
++ wg_packet_create_data_done(peer, first);
+ else
+ kfree_skb_list(first);
+
+@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct work_struct *work)
+ break;
+ }
+ }
+- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
+- state);
++ wg_queue_enqueue_per_peer_tx(first, state);
+ if (need_resched())
+ cond_resched();
+ }
+ }
+
+-static void wg_packet_create_data(struct sk_buff *first)
++static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
+ {
+- struct wg_peer *peer = PACKET_PEER(first);
+ struct wg_device *wg = peer->device;
+ int ret = -EINVAL;
+
+@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct sk_buff *first)
+ if (unlikely(READ_ONCE(peer->is_dead)))
+ goto err;
+
+- ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue,
+- &peer->tx_queue, first,
+- wg->packet_crypt_wq,
+- &wg->encrypt_queue.last_cpu);
++ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
++ wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
+ if (unlikely(ret == -EPIPE))
+- wg_queue_enqueue_per_peer(&peer->tx_queue, first,
+- PACKET_STATE_DEAD);
++ wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
+ err:
+ rcu_read_unlock_bh();
+ if (likely(!ret || ret == -EPIPE))
+@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
+ packets.prev->next = NULL;
+ wg_peer_get(keypair->entry.peer);
+ PACKET_CB(packets.next)->keypair = keypair;
+- wg_packet_create_data(packets.next);
++ wg_packet_create_data(peer, packets.next);
+ return;
+
+ out_invalid:
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 7d98250380ec5..e815aab412d7a 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -9117,7 +9117,9 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
++ mutex_lock(&ar->conf_mutex);
+ ath10k_debug_fw_stats_request(ar);
++ mutex_unlock(&ar->conf_mutex);
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index bf9a8cb713dc0..1c3307e3b1085 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -1045,12 +1045,13 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ ret = ath10k_snoc_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+- goto err_wlan_enable;
++ goto err_free_rri;
+ }
+
+ return 0;
+
+-err_wlan_enable:
++err_free_rri:
++ ath10k_ce_free_rri(ar);
+ ath10k_snoc_wlan_disable(ar);
+
+ return ret;
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 7b5834157fe51..e6135795719a1 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -240,8 +240,10 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
+ __le32_to_cpu(stat->last_tx_rate_code),
+ __le32_to_cpu(stat->last_tx_bitrate_kbps));
+
++ rcu_read_lock();
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
+ if (!sta) {
++ rcu_read_unlock();
+ ath10k_warn(ar, "not found station for peer stats\n");
+ return -EINVAL;
+ }
+@@ -251,6 +253,7 @@ static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16
+ arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
+ arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
+ arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
++ rcu_read_unlock();
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
+index c1608f64ea95d..7d799fe6fbd89 100644
+--- a/drivers/net/wireless/ath/ath11k/mac.c
++++ b/drivers/net/wireless/ath/ath11k/mac.c
+@@ -4248,11 +4248,6 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
+- mutex_unlock(&ar->conf_mutex);
+-
+- rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+- &ab->pdevs[ar->pdev_idx]);
+-
+ /* allow device to enter IMPS */
+ if (ab->hw_params.idle_ps) {
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+@@ -4262,6 +4257,12 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+ goto err;
+ }
+ }
++
++ mutex_unlock(&ar->conf_mutex);
++
++ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
++ &ab->pdevs[ar->pdev_idx]);
++
+ return 0;
+
+ err:
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index 017a43bc400ca..4c81b1d7f4171 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file,
+
+ ah->nf_override = val;
+
+- if (ah->curchan)
++ if (ah->curchan) {
++ ath9k_ps_wakeup(sc);
+ ath9k_hw_loadnf(ah, ah->curchan);
++ ath9k_ps_restore(sc);
++ }
+
+ return count;
+ }
+diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
+index b669dff24b6e0..665b737fbb0d8 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_n.c
++++ b/drivers/net/wireless/broadcom/b43/phy_n.c
+@@ -5311,7 +5311,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
+
+ for (i = 0; i < 4; i++) {
+ if (dev->phy.rev >= 3)
+- table[i] = coef[i];
++ coef[i] = table[i];
+ else
+ coef[i] = 0;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+index 895a907acdf0f..37ce4fe136c5e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+@@ -198,14 +198,14 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
+ le32_to_cpu(sku_id->data[1]),
+ le32_to_cpu(sku_id->data[2]));
+
++ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
++ len -= ALIGN(tlv_len, 4);
++
+ if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ int ret;
+
+- data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+- len -= ALIGN(tlv_len, 4);
+-
+ ret = iwl_pnvm_handle_section(trans, data, len);
+ if (!ret)
+ return 0;
+@@ -227,6 +227,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notification_wait pnvm_wait;
+ static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ PNVM_INIT_COMPLETE_NTFY) };
++ int ret;
+
+ /* if the SKU_ID is empty, there's nothing to do */
+ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+@@ -236,7 +237,6 @@ int iwl_pnvm_load(struct iwl_trans *trans,
+ if (!trans->pnvm_loaded) {
+ const struct firmware *pnvm;
+ char pnvm_name[64];
+- int ret;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+@@ -264,6 +264,11 @@ int iwl_pnvm_load(struct iwl_trans *trans,
+
+ release_firmware(pnvm);
+ }
++ } else {
++ /* if we already loaded, we need to set it again */
++ ret = iwl_trans_set_pnvm(trans, NULL, 0);
++ if (ret)
++ return ret;
+ }
+
+ iwl_init_notification_wait(notif_wait, &pnvm_wait,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 313e9f106f465..4c5609cdcbdee 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -859,12 +859,10 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ if (cmd_ver == 3) {
+ len = sizeof(cmd.v3);
+ n_bands = ARRAY_SIZE(cmd.v3.table[0]);
+- cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(cmd.v2);
+ n_bands = ARRAY_SIZE(cmd.v2.table[0]);
+- cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ } else {
+ len = sizeof(cmd.v1);
+ n_bands = ARRAY_SIZE(cmd.v1.table[0]);
+@@ -884,6 +882,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ if (ret)
+ return 0;
+
++ /*
++ * Set the revision on versions that contain it.
++ * This must be done after calling iwl_sar_geo_init().
++ */
++ if (cmd_ver == 3)
++ cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
++ else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
++ IWL_UCODE_TLV_API_SAR_TABLE_VER))
++ cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
++
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ 0, len, &cmd);
+@@ -892,7 +900,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
+ {
+ union acpi_object *wifi_pkg, *data, *enabled;
+- union iwl_ppag_table_cmd ppag_table;
+ int i, j, ret, tbl_rev, num_sub_bands;
+ int idx = 2;
+ s8 *gain;
+@@ -946,8 +953,8 @@ read_table:
+ goto out_free;
+ }
+
+- ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
+- if (!ppag_table.v1.enabled) {
++ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
++ if (!mvm->fwrt.ppag_table.v1.enabled) {
+ ret = 0;
+ goto out_free;
+ }
+@@ -962,16 +969,23 @@ read_table:
+ union acpi_object *ent;
+
+ ent = &wifi_pkg->package.elements[idx++];
+- if (ent->type != ACPI_TYPE_INTEGER ||
+- (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) ||
+- (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
+- (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
+- (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
+- ppag_table.v1.enabled = cpu_to_le32(0);
++ if (ent->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
++
+ gain[i * num_sub_bands + j] = ent->integer.value;
++
++ if ((j == 0 &&
++ (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB ||
++ gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) ||
++ (j != 0 &&
++ (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB ||
++ gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) {
++ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
++ ret = -EINVAL;
++ goto out_free;
++ }
+ }
+ }
+ ret = 0;
+@@ -984,7 +998,6 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ {
+ u8 cmd_ver;
+ int i, j, ret, num_sub_bands, cmd_size;
+- union iwl_ppag_table_cmd ppag_table;
+ s8 *gain;
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+@@ -1003,7 +1016,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS;
+ gain = mvm->fwrt.ppag_table.v1.gain[0];
+- cmd_size = sizeof(ppag_table.v1);
++ cmd_size = sizeof(mvm->fwrt.ppag_table.v1);
+ if (mvm->fwrt.ppag_ver == 2) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v2 but FW supports v1, sending truncated table\n");
+@@ -1011,7 +1024,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ } else if (cmd_ver == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = mvm->fwrt.ppag_table.v2.gain[0];
+- cmd_size = sizeof(ppag_table.v2);
++ cmd_size = sizeof(mvm->fwrt.ppag_table.v2);
+ if (mvm->fwrt.ppag_ver == 1) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+@@ -1031,7 +1044,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
+ IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD),
+- 0, cmd_size, &ppag_table);
++ 0, cmd_size, &mvm->fwrt.ppag_table);
+ if (ret < 0)
+ IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
+ ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+index 4e1bdf13e5e71..0b012f8c9eb22 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+@@ -999,9 +999,6 @@ void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+
+ lockdep_assert_held(&mvm->mutex);
+
+- if (!te_data->running)
+- return;
+-
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index 5b5134dd49af8..8fba190e84cf3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -298,17 +298,20 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+- ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+- &trans_pcie->pnvm_dram);
+- if (ret < 0) {
+- IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
+- ret);
+- return ret;
++ /* only allocate the DRAM if not allocated yet */
++ if (!trans->pnvm_loaded) {
++ if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
++ return -EBUSY;
++
++ ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
++ &trans_pcie->pnvm_dram);
++ if (ret < 0) {
++ IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
++ ret);
++ return ret;
++ }
+ }
+
+- if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+- return -EBUSY;
+-
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
+ cpu_to_le64(trans_pcie->pnvm_dram.physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index acb786d8b1d8f..e02a4fbb74de5 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -162,13 +162,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+ {
+ struct xenvif_queue *queue = dev_id;
+ int old;
++ bool has_rx, has_tx;
+
+ old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+ WARN(old, "Interrupt while EOI pending\n");
+
+- /* Use bitwise or as we need to call both functions. */
+- if ((!xenvif_handle_tx_interrupt(queue) |
+- !xenvif_handle_rx_interrupt(queue))) {
++ has_tx = xenvif_handle_tx_interrupt(queue);
++ has_rx = xenvif_handle_rx_interrupt(queue);
++
++ if (!has_rx && !has_tx) {
+ atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 282b7a4ea9a9a..fdfc18a222cc3 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+ if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
+ ns->head->disk->queue);
++#ifdef CONFIG_BLK_DEV_ZONED
++ if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
++ ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
++#endif
+ }
+
+ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index dc1ea468b182b..1827d8d8f3b00 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -469,7 +469,6 @@ out:
+ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ {
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+- struct nvmet_ns *ns;
+ struct nvme_id_ns *id;
+ u16 status = 0;
+
+@@ -486,20 +485,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ }
+
+ /* return an all zeroed buffer if we can't find an active namespace */
+- ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
+- if (!ns) {
+- status = NVME_SC_INVALID_NS;
++ req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
++ if (!req->ns) {
++ status = 0;
+ goto done;
+ }
+
+- nvmet_ns_revalidate(ns);
++ nvmet_ns_revalidate(req->ns);
+
+ /*
+ * nuse = ncap = nsze isn't always true, but we have no way to find
+ * that out from the underlying device.
+ */
+- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
+- switch (req->port->ana_state[ns->anagrpid]) {
++ id->ncap = id->nsze =
++ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
++ switch (req->port->ana_state[req->ns->anagrpid]) {
+ case NVME_ANA_INACCESSIBLE:
+ case NVME_ANA_PERSISTENT_LOSS:
+ break;
+@@ -508,8 +508,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ break;
+ }
+
+- if (ns->bdev)
+- nvmet_bdev_set_limits(ns->bdev, id);
++ if (req->ns->bdev)
++ nvmet_bdev_set_limits(req->ns->bdev, id);
+
+ /*
+ * We just provide a single LBA format that matches what the
+@@ -523,25 +523,24 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
+ * controllers, but also with any other user of the block device.
+ */
+ id->nmic = (1 << 0);
+- id->anagrpid = cpu_to_le32(ns->anagrpid);
++ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
+
+- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
++ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
+
+- id->lbaf[0].ds = ns->blksize_shift;
++ id->lbaf[0].ds = req->ns->blksize_shift;
+
+- if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
++ if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
+ id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
+ NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
+ NVME_NS_DPC_PI_TYPE3;
+ id->mc = NVME_MC_EXTENDED_LBA;
+- id->dps = ns->pi_type;
++ id->dps = req->ns->pi_type;
+ id->flbas = NVME_NS_FLBAS_META_EXT;
+- id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
++ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
+ }
+
+- if (ns->readonly)
++ if (req->ns->readonly)
+ id->nsattr |= (1 << 0);
+- nvmet_put_namespace(ns);
+ done:
+ if (!status)
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index aacf06f0b4312..8b0485ada315b 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -379,7 +379,7 @@ err:
+ return NVME_SC_INTERNAL;
+ }
+
+-static void nvmet_tcp_ddgst(struct ahash_request *hash,
++static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+ {
+ ahash_request_set_crypt(hash, cmd->req.sg,
+@@ -387,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
+ crypto_ahash_digest(hash);
+ }
+
++static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
++ struct nvmet_tcp_cmd *cmd)
++{
++ struct scatterlist sg;
++ struct kvec *iov;
++ int i;
++
++ crypto_ahash_init(hash);
++ for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
++ sg_init_one(&sg, iov->iov_base, iov->iov_len);
++ ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
++ crypto_ahash_update(hash);
++ }
++ ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
++ crypto_ahash_final(hash);
++}
++
+ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+ {
+ struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
+@@ -411,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+
+ if (queue->data_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_DDGST;
+- nvmet_tcp_ddgst(queue->snd_hash, cmd);
++ nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
+ }
+
+ if (cmd->queue->hdr_digest) {
+@@ -1060,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
+ {
+ struct nvmet_tcp_queue *queue = cmd->queue;
+
+- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
++ nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
+ queue->offset = 0;
+ queue->left = NVME_TCP_DIGEST_LENGTH;
+ queue->rcv_state = NVMET_TCP_RECV_DDGST;
+@@ -1081,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+ cmd->rbytes_done += ret;
+ }
+
++ if (queue->data_digest) {
++ nvmet_tcp_prep_recv_ddgst(cmd);
++ return 0;
++ }
+ nvmet_tcp_unmap_pdu_iovec(cmd);
+
+ if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ cmd->rbytes_done == cmd->req.transfer_len) {
+- if (queue->data_digest) {
+- nvmet_tcp_prep_recv_ddgst(cmd);
+- return 0;
+- }
+ cmd->req.execute(&cmd->req);
+ }
+
+@@ -1468,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
+ if (inet->rcv_tos > 0)
+ ip_sock_set_tos(sock->sk, inet->rcv_tos);
+
++ ret = 0;
+ write_lock_bh(&sock->sk->sk_callback_lock);
+- sock->sk->sk_user_data = queue;
+- queue->data_ready = sock->sk->sk_data_ready;
+- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+- queue->state_change = sock->sk->sk_state_change;
+- sock->sk->sk_state_change = nvmet_tcp_state_change;
+- queue->write_space = sock->sk->sk_write_space;
+- sock->sk->sk_write_space = nvmet_tcp_write_space;
++ if (sock->sk->sk_state != TCP_ESTABLISHED) {
++ /*
++ * If the socket is already closing, don't even start
++ * consuming it
++ */
++ ret = -ENOTCONN;
++ } else {
++ sock->sk->sk_user_data = queue;
++ queue->data_ready = sock->sk->sk_data_ready;
++ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
++ queue->state_change = sock->sk->sk_state_change;
++ sock->sk->sk_state_change = nvmet_tcp_state_change;
++ queue->write_space = sock->sk->sk_write_space;
++ sock->sk->sk_write_space = nvmet_tcp_write_space;
++ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
++ }
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+- return 0;
++ return ret;
+ }
+
+ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+@@ -1526,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+ if (ret)
+ goto out_destroy_sq;
+
+- queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+-
+ return 0;
+ out_destroy_sq:
+ mutex_lock(&nvmet_tcp_queue_mutex);
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 177f5bf27c6d5..a5ab1e0c74cf6 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -682,7 +682,9 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+
+ for_each_child_of_node(parent, child) {
+ addr = of_get_property(child, "reg", &len);
+- if (!addr || (len < 2 * sizeof(u32))) {
++ if (!addr)
++ continue;
++ if (len < 2 * sizeof(u32)) {
+ dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ return -EINVAL;
+ }
+@@ -713,6 +715,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+ cell->name, nvmem->stride);
+ /* Cells already added will be freed later. */
+ kfree_const(cell->name);
++ of_node_put(cell->np);
+ kfree(cell);
+ return -EINVAL;
+ }
+diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
+index a72704cd04681..f6e9f96933ca2 100644
+--- a/drivers/nvmem/qcom-spmi-sdam.c
++++ b/drivers/nvmem/qcom-spmi-sdam.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2017, 2020 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+ #include
+@@ -18,7 +18,6 @@
+ #define SDAM_PBS_TRIG_CLR 0xE6
+
+ struct sdam_chip {
+- struct platform_device *pdev;
+ struct regmap *regmap;
+ struct nvmem_config sdam_config;
+ unsigned int base;
+@@ -65,7 +64,7 @@ static int sdam_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+ {
+ struct sdam_chip *sdam = priv;
+- struct device *dev = &sdam->pdev->dev;
++ struct device *dev = sdam->sdam_config.dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+@@ -86,7 +85,7 @@ static int sdam_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+ {
+ struct sdam_chip *sdam = priv;
+- struct device *dev = &sdam->pdev->dev;
++ struct device *dev = sdam->sdam_config.dev;
+ int rc;
+
+ if (!sdam_is_valid(sdam, offset, bytes)) {
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index feb0f2d67fc5f..dcc1dd96911a9 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1146,8 +1146,16 @@ int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+ {
+- if (nomap)
+- return memblock_remove(base, size);
++ if (nomap) {
++ /*
++ * If the memory is already reserved (by another region), we
++ * should not allow it to be marked nomap.
++ */
++ if (memblock_is_region_reserved(base, size))
++ return -EBUSY;
++
++ return memblock_mark_nomap(base, size);
++ }
+ return memblock_reserve(base, size);
+ }
+
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index 03cb387236c4c..d0c0336be39b4 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -755,7 +755,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+ struct device *dev, struct device_node *np)
+ {
+ struct dev_pm_opp *new_opp;
+- u64 rate = 0;
+ u32 val;
+ int ret;
+ bool rate_not_available = false;
+@@ -772,7 +771,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
+
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
+ if (!_opp_is_supported(dev, opp_table, np)) {
+- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
++ dev_dbg(dev, "OPP not supported by hardware: %lu\n",
++ new_opp->rate);
+ goto free_opp;
+ }
+
+diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
+index 811c1cb2e8deb..1cb7cfc75d6e4 100644
+--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
++++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
+@@ -321,9 +321,10 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = cdns_pcie_host_bar_config(rc, entry);
+- if (err)
++ if (err) {
+ dev_err(dev, "Fail to configure IB using dma-ranges\n");
+- return err;
++ return err;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index affa2713bf80e..0d605a0d69e30 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -398,7 +398,9 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+- val &= ~PHY_REFCLK_USE_PAD;
++ /* USE_PAD is required only for ipq806x */
++ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
++ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
+index 4d1c4b24e5370..a728e8f9ad3c8 100644
+--- a/drivers/pci/controller/pcie-rcar-host.c
++++ b/drivers/pci/controller/pcie-rcar-host.c
+@@ -735,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+ }
+
+ /* setup MSI data target */
+- msi->pages = __get_free_pages(GFP_KERNEL, 0);
++ msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA32, 0);
+ rcar_pcie_hw_enable_msi(host);
+
+ return 0;
+diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
+index 904dec0d3a88f..990a00e08bc5b 100644
+--- a/drivers/pci/controller/pcie-rockchip.c
++++ b/drivers/pci/controller/pcie-rockchip.c
+@@ -82,7 +82,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+ }
+
+ rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
+- "mgmt-sticky");
++ "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+@@ -118,11 +118,11 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+ }
+
+ if (rockchip->is_rc) {
+- rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+- if (IS_ERR(rockchip->ep_gpio)) {
+- dev_err(dev, "missing ep-gpios property in node\n");
+- return PTR_ERR(rockchip->ep_gpio);
+- }
++ rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
++ GPIOD_OUT_HIGH);
++ if (IS_ERR(rockchip->ep_gpio))
++ return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
++ "failed to get ep GPIO\n");
+ }
+
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
+index f92e0152e65e3..67937facd90cd 100644
+--- a/drivers/pci/controller/pcie-xilinx-cpm.c
++++ b/drivers/pci/controller/pcie-xilinx-cpm.c
+@@ -404,6 +404,7 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
+ return 0;
+ out:
+ xilinx_cpm_free_irq_domains(port);
++ of_node_put(pcie_intc_node);
+ dev_err(dev, "Failed to allocate IRQ domains\n");
+
+ return -ENOMEM;
+diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
+index 139869d50eb26..fdaf86a888b73 100644
+--- a/drivers/pci/pci-bridge-emul.c
++++ b/drivers/pci/pci-bridge-emul.c
+@@ -21,8 +21,9 @@
+ #include "pci-bridge-emul.h"
+
+ #define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
++#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
+ #define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
+-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
++#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
+
+ /**
+ * struct pci_bridge_reg_behavior - register bits behaviors
+@@ -46,7 +47,8 @@ struct pci_bridge_reg_behavior {
+ u32 w1c;
+ };
+
+-static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
++static const
++struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
+ [PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ [PCI_COMMAND / 4] = {
+ .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+@@ -164,7 +166,8 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ },
+ };
+
+-static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
++static const
++struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = {
+ [PCI_CAP_LIST_ID / 4] = {
+ /*
+ * Capability ID, Next Capability Pointer and
+@@ -260,6 +263,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
+ unsigned int flags)
+ {
++ BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
++
+ bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
+ bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->conf.cache_line_size = 0x10;
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 43eda101fcf40..7f1acb3918d0c 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -410,10 +410,16 @@ EXPORT_SYMBOL(pci_release_resource);
+ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
+ {
+ struct resource *res = dev->resource + resno;
++ struct pci_host_bridge *host;
+ int old, ret;
+ u32 sizes;
+ u16 cmd;
+
++ /* Check if we must preserve the firmware's resource assignment */
++ host = pci_find_host_bridge(dev->bus);
++ if (host->preserve_config)
++ return -ENOTSUPP;
++
+ /* Make sure the resource isn't assigned before resizing it. */
+ if (!(res->flags & IORESOURCE_UNSET))
+ return -EBUSY;
+diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
+index 31e39558d49d8..8b003c890b87b 100644
+--- a/drivers/pci/syscall.c
++++ b/drivers/pci/syscall.c
+@@ -20,7 +20,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ u16 word;
+ u32 dword;
+ long err;
+- long cfg_ret;
++ int cfg_ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -46,7 +46,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ }
+
+ err = -EIO;
+- if (cfg_ret != PCIBIOS_SUCCESSFUL)
++ if (cfg_ret)
+ goto error;
+
+ switch (len) {
+@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ if (err)
+ break;
+ err = pci_user_write_config_byte(dev, off, byte);
+- if (err != PCIBIOS_SUCCESSFUL)
++ if (err)
+ err = -EIO;
+ break;
+
+@@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ if (err)
+ break;
+ err = pci_user_write_config_word(dev, off, word);
+- if (err != PCIBIOS_SUCCESSFUL)
++ if (err)
+ err = -EIO;
+ break;
+
+@@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ if (err)
+ break;
+ err = pci_user_write_config_dword(dev, off, dword);
+- if (err != PCIBIOS_SUCCESSFUL)
++ if (err)
+ err = -EIO;
+ break;
+
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index a76ff594f3ca4..46defb1dcf867 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -1150,7 +1150,7 @@ static int arm_cmn_commit_txn(struct pmu *pmu)
+ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+ {
+ struct arm_cmn *cmn;
+- unsigned int target;
++ unsigned int i, target;
+
+ cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node);
+ if (cpu != cmn->cpu)
+@@ -1161,6 +1161,8 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+ return 0;
+
+ perf_pmu_migrate_context(&cmn->pmu, cpu, target);
++ for (i = 0; i < cmn->num_dtcs; i++)
++ irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target));
+ cmn->cpu = target;
+ return 0;
+ }
+@@ -1502,7 +1504,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ struct arm_cmn *cmn;
+ const char *name;
+ static atomic_t id;
+- int err, rootnode, this_id;
++ int err, rootnode;
+
+ cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
+ if (!cmn)
+@@ -1549,14 +1551,9 @@ static int arm_cmn_probe(struct platform_device *pdev)
+ .cancel_txn = arm_cmn_end_txn,
+ };
+
+- this_id = atomic_fetch_inc(&id);
+- if (this_id == 0) {
+- name = "arm_cmn";
+- } else {
+- name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
+- if (!name)
+- return -ENOMEM;
+- }
++ name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", atomic_fetch_inc(&id));
++ if (!name)
++ return -ENOMEM;
+
+ err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
+ if (err)
+diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
+index 00dabe5fab8a0..68d9c2f6a5caf 100644
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -52,6 +52,7 @@ config PHY_XGENE
+ config USB_LGM_PHY
+ tristate "INTEL Lightning Mountain USB PHY Driver"
+ depends on USB_SUPPORT
++ depends on X86 || COMPILE_TEST
+ select USB_PHY
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
+diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
+index f310e15d94cbc..591a15834b48f 100644
+--- a/drivers/phy/cadence/phy-cadence-torrent.c
++++ b/drivers/phy/cadence/phy-cadence-torrent.c
+@@ -2298,6 +2298,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
+
+ if (total_num_lanes > MAX_NUM_LANES) {
+ dev_err(dev, "Invalid lane configuration\n");
++ ret = -EINVAL;
+ goto put_lnk_rst;
+ }
+
+diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+index a7d126192cf12..29d246ea24b47 100644
+--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
++++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+@@ -124,8 +124,16 @@ static int ltq_rcu_usb2_phy_power_on(struct phy *phy)
+ reset_control_deassert(priv->phy_reset);
+
+ ret = clk_prepare_enable(priv->phy_gate_clk);
+- if (ret)
++ if (ret) {
+ dev_err(dev, "failed to enable PHY gate\n");
++ return ret;
++ }
++
++ /*
++ * at least the xrx200 usb2 phy requires some extra time to be
++ * operational after enabling the clock
++ */
++ usleep_range(100, 200);
+
+ return ret;
+ }
+diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
+index 1e424f263e7ab..496d199852aff 100644
+--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
+@@ -248,15 +248,17 @@ static int rockchip_emmc_phy_init(struct phy *phy)
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+- * The clock is optional, so upon any error we just set to NULL.
++ * The clock is optional, using clk_get_optional() to get the clock
++ * and do error processing if the return value != NULL
+ *
+ * NOTE: we don't do anything special for EPROBE_DEFER here. Given the
+ * above expected use case, EPROBE_DEFER isn't sensible to expect, so
+ * it's just like any other error.
+ */
+- rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
++ rk_phy->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ if (IS_ERR(rk_phy->emmcclk)) {
+- dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
++ ret = PTR_ERR(rk_phy->emmcclk);
++ dev_err(&phy->dev, "Error getting emmcclk: %d\n", ret);
+ rk_phy->emmcclk = NULL;
+ }
+
+diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
+index 7c92a6e22d75d..aa7f7aa772971 100644
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -526,11 +526,13 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
+ * power), not wake up.
+ */
+ ec_dev->host_event_wake_mask = U32_MAX &
+- ~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) |
+- BIT(EC_HOST_EVENT_BATTERY_LOW) |
+- BIT(EC_HOST_EVENT_BATTERY_CRITICAL) |
+- BIT(EC_HOST_EVENT_PD_MCU) |
+- BIT(EC_HOST_EVENT_BATTERY_STATUS));
++ ~(EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED) |
++ EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED) |
++ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW) |
++ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL) |
++ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY) |
++ EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
++ EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS));
+ /*
+ * Old ECs may not support this command. Complain about all
+ * other errors.
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 91e6176cdfbdf..ac4125ec06603 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -1369,7 +1369,7 @@ config INTEL_PMC_CORE
+ - MPHY/PLL gating status (Sunrisepoint PCH only)
+
+ config INTEL_PMT_CLASS
+- tristate "Intel Platform Monitoring Technology (PMT) Class driver"
++ tristate
+ help
+ The Intel Platform Monitoring Technology (PMT) class driver provides
+ the basic sysfs interface and file hierarchy uses by PMT devices.
+@@ -1382,6 +1382,7 @@ config INTEL_PMT_CLASS
+
+ config INTEL_PMT_TELEMETRY
+ tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
++ depends on MFD_INTEL_PMT
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitory Technology (PMT) Telemetry driver provides
+@@ -1393,6 +1394,7 @@ config INTEL_PMT_TELEMETRY
+
+ config INTEL_PMT_CRASHLOG
+ tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
++ depends on MFD_INTEL_PMT
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitoring Technology (PMT) crashlog driver provides
+diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
+index 2fe3a627cb535..d9cf91e5b06d0 100644
+--- a/drivers/power/reset/at91-sama5d2_shdwc.c
++++ b/drivers/power/reset/at91-sama5d2_shdwc.c
+@@ -37,7 +37,7 @@
+
+ #define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
+ #define AT91_SHDW_WKUPDBC_SHIFT 24
+-#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16)
++#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24)
+ #define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \
+ & AT91_SHDW_WKUPDBC_MASK)
+
+diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
+index eec646c568b7b..1699b9269a78e 100644
+--- a/drivers/power/supply/Kconfig
++++ b/drivers/power/supply/Kconfig
+@@ -229,6 +229,7 @@ config BATTERY_SBS
+ config CHARGER_SBS
+ tristate "SBS Compliant charger"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ Say Y to include support for SBS compliant battery chargers.
+
+diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
+index 70b28b699a80c..8933ae26c3d69 100644
+--- a/drivers/power/supply/axp20x_usb_power.c
++++ b/drivers/power/supply/axp20x_usb_power.c
+@@ -593,6 +593,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
+ power->axp20x_id = axp_data->axp20x_id;
+ power->regmap = axp20x->regmap;
+ power->num_irqs = axp_data->num_irq_names;
++ INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+
+ if (power->axp20x_id == AXP202_ID) {
+ /* Enable vbus valid checking */
+@@ -645,7 +646,6 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
+ }
+ }
+
+- INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+ if (axp20x_usb_vbus_needs_polling(power))
+ queue_delayed_work(system_power_efficient_wq, &power->vbus_detect, 0);
+
+diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
+index 295611b3b15e9..cebc5c8fda1b5 100644
+--- a/drivers/power/supply/cpcap-battery.c
++++ b/drivers/power/supply/cpcap-battery.c
+@@ -561,17 +561,21 @@ static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (error)
+- return error;
++ goto out_put;
+
+ /* Allow charger const voltage lower than battery const voltage */
+ if (const_charge_voltage > prop.intval)
+- return 0;
++ goto out_put;
+
+ val.intval = const_charge_voltage;
+
+- return power_supply_set_property(charger,
++ error = power_supply_set_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &val);
++out_put:
++ power_supply_put(charger);
++
++ return error;
+ }
+
+ static int cpcap_battery_set_property(struct power_supply *psy,
+@@ -666,7 +670,7 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
+
+ error = devm_request_threaded_irq(ddata->dev, irq, NULL,
+ cpcap_battery_irq_thread,
+- IRQF_SHARED,
++ IRQF_SHARED | IRQF_ONESHOT,
+ name, ddata);
+ if (error) {
+ dev_err(ddata->dev, "could not get irq %s: %i\n",
+diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
+index c0d452e3dc8b0..22fff01425d63 100644
+--- a/drivers/power/supply/cpcap-charger.c
++++ b/drivers/power/supply/cpcap-charger.c
+@@ -301,6 +301,8 @@ cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
+ &prop);
+ if (!error)
+ voltage = prop.intval;
++
++ power_supply_put(battery);
+ }
+
+ return voltage;
+@@ -708,7 +710,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
+
+ error = devm_request_threaded_irq(ddata->dev, irq, NULL,
+ cpcap_charger_irq_thread,
+- IRQF_SHARED,
++ IRQF_SHARED | IRQF_ONESHOT,
+ name, ddata);
+ if (error) {
+ dev_err(ddata->dev, "could not get irq %s: %i\n",
+diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
+index d3bf35ed12cee..8cfbd8d6b4786 100644
+--- a/drivers/power/supply/smb347-charger.c
++++ b/drivers/power/supply/smb347-charger.c
+@@ -137,6 +137,7 @@
+ * @mains_online: is AC/DC input connected
+ * @usb_online: is USB input connected
+ * @charging_enabled: is charging enabled
++ * @irq_unsupported: is interrupt unsupported by SMB hardware
+ * @max_charge_current: maximum current (in uA) the battery can be charged
+ * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
+ * @pre_charge_current: current (in uA) to use in pre-charging phase
+@@ -193,6 +194,7 @@ struct smb347_charger {
+ bool mains_online;
+ bool usb_online;
+ bool charging_enabled;
++ bool irq_unsupported;
+
+ unsigned int max_charge_current;
+ unsigned int max_charge_voltage;
+@@ -862,6 +864,9 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
+ {
+ int ret;
+
++ if (smb->irq_unsupported)
++ return 0;
++
+ ret = smb347_set_writable(smb, true);
+ if (ret < 0)
+ return ret;
+@@ -923,8 +928,6 @@ static int smb347_irq_init(struct smb347_charger *smb,
+ ret = regmap_update_bits(smb->regmap, CFG_STAT,
+ CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+ CFG_STAT_DISABLED);
+- if (ret < 0)
+- client->irq = 0;
+
+ smb347_set_writable(smb, false);
+
+@@ -1345,6 +1348,7 @@ static int smb347_probe(struct i2c_client *client,
+ if (ret < 0) {
+ dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
+ dev_warn(dev, "disabling IRQ support\n");
++ smb->irq_unsupported = true;
+ } else {
+ smb347_irq_enable(smb);
+ }
+@@ -1357,8 +1361,8 @@ static int smb347_remove(struct i2c_client *client)
+ {
+ struct smb347_charger *smb = i2c_get_clientdata(client);
+
+- if (client->irq)
+- smb347_irq_disable(smb);
++ smb347_irq_disable(smb);
++
+ return 0;
+ }
+
+diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
+index 5ede8255926ef..14b18fb4f5274 100644
+--- a/drivers/pwm/pwm-iqs620a.c
++++ b/drivers/pwm/pwm-iqs620a.c
+@@ -46,7 +46,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ struct iqs620_pwm_private *iqs620_pwm;
+ struct iqs62x_core *iqs62x;
+- u64 duty_scale;
++ unsigned int duty_cycle;
++ unsigned int duty_scale;
+ int ret;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+@@ -70,7 +71,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ * For lower duty cycles (e.g. 0), the PWM output is simply disabled to
+ * allow an external pull-down resistor to hold the GPIO3/LTX pin low.
+ */
+- duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS);
++ duty_cycle = min_t(u64, state->duty_cycle, IQS620_PWM_PERIOD_NS);
++ duty_scale = duty_cycle * 256 / IQS620_PWM_PERIOD_NS;
+
+ mutex_lock(&iqs620_pwm->lock);
+
+@@ -82,7 +84,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ }
+
+ if (duty_scale) {
+- u8 duty_val = min_t(u64, duty_scale - 1, 0xff);
++ u8 duty_val = duty_scale - 1;
+
+ ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE,
+ duty_val);
+diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
+index 389a5e1404128..f3a5641f6bca5 100644
+--- a/drivers/pwm/pwm-rockchip.c
++++ b/drivers/pwm/pwm-rockchip.c
+@@ -288,6 +288,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ const struct of_device_id *id;
+ struct rockchip_pwm_chip *pc;
+ u32 enable_conf, ctrl;
++ bool enabled;
+ int ret, count;
+
+ id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
+@@ -330,9 +331,9 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- ret = clk_prepare(pc->pclk);
++ ret = clk_prepare_enable(pc->pclk);
+ if (ret) {
+- dev_err(&pdev->dev, "Can't prepare APB clk: %d\n", ret);
++ dev_err(&pdev->dev, "Can't prepare enable APB clk: %d\n", ret);
+ goto err_clk;
+ }
+
+@@ -349,23 +350,26 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+ pc->chip.of_pwm_n_cells = 3;
+ }
+
++ enable_conf = pc->data->enable_conf;
++ ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
++ enabled = (ctrl & enable_conf) == enable_conf;
++
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+- clk_unprepare(pc->clk);
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ goto err_pclk;
+ }
+
+ /* Keep the PWM clk enabled if the PWM appears to be up and running. */
+- enable_conf = pc->data->enable_conf;
+- ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
+- if ((ctrl & enable_conf) != enable_conf)
++ if (!enabled)
+ clk_disable(pc->clk);
+
++ clk_disable(pc->pclk);
++
+ return 0;
+
+ err_pclk:
+- clk_unprepare(pc->pclk);
++ clk_disable_unprepare(pc->pclk);
+ err_clk:
+ clk_disable_unprepare(pc->clk);
+
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index 90cb8445f7216..d260c442b788d 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -1070,7 +1070,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
+ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
+ {
+ struct device_node *np, *regulators;
+- int ret;
++ int ret = 0;
+ u32 dcdcfreq = 0;
+
+ np = of_node_get(pdev->dev.parent->of_node);
+@@ -1085,13 +1085,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
+ ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
+- return ret;
+ }
+-
+ of_node_put(regulators);
+ }
+
+- return 0;
++ of_node_put(np);
++ return ret;
+ }
+
+ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 67a768fe5b2a3..2e6c6af9d1c3a 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1617,7 +1617,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ const char *supply_name)
+ {
+ struct regulator *regulator;
+- int err;
++ int err = 0;
+
+ if (dev) {
+ char buf[REG_STR_SIZE];
+@@ -1663,8 +1663,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ }
+ }
+
+- regulator->debugfs = debugfs_create_dir(supply_name,
+- rdev->debugfs);
++ if (err != -EEXIST)
++ regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+ if (!regulator->debugfs) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ } else {
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index c395a8dda6f7c..37a2abbe85c72 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -732,6 +732,15 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+ };
+
++static const struct rpmh_vreg_hw_data pmic5_hfsmps515_1 = {
++ .regulator_type = VRM,
++ .ops = &rpmh_regulator_vrm_ops,
++ .voltage_range = REGULATOR_LINEAR_RANGE(900000, 0, 4, 16000),
++ .n_voltages = 5,
++ .pmic_mode_map = pmic_mode_map_pmic5_smps,
++ .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
++};
++
+ static const struct rpmh_vreg_hw_data pmic5_bob = {
+ .regulator_type = VRM,
+ .ops = &rpmh_regulator_vrm_bypass_ops,
+@@ -928,6 +937,19 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
++ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
++ {},
++};
++
++static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
++ RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
++ RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps515_1, "vdd-s2"),
++ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, "vdd-l1"),
++ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo, "vdd-l2"),
++ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo, "vdd-l3"),
++ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
++ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
++ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
+ {},
+ };
+@@ -1057,6 +1079,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
+ .compatible = "qcom,pm8009-rpmh-regulators",
+ .data = pm8009_vreg_data,
+ },
++ {
++ .compatible = "qcom,pm8009-1-rpmh-regulators",
++ .data = pm8009_1_vreg_data,
++ },
+ {
+ .compatible = "qcom,pm8150-rpmh-regulators",
+ .data = pm8150_vreg_data,
+diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
+index 399002383b28b..5c558b153d55e 100644
+--- a/drivers/regulator/rohm-regulator.c
++++ b/drivers/regulator/rohm-regulator.c
+@@ -52,9 +52,12 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ char *prop;
+ unsigned int reg, mask, omask, oreg = desc->enable_reg;
+
+- for (i = 0; i < ROHM_DVS_LEVEL_MAX && !ret; i++) {
+- if (dvs->level_map & (1 << i)) {
+- switch (i + 1) {
++ for (i = 0; i < ROHM_DVS_LEVEL_VALID_AMOUNT && !ret; i++) {
++ int bit;
++
++ bit = BIT(i);
++ if (dvs->level_map & bit) {
++ switch (bit) {
+ case ROHM_DVS_LEVEL_RUN:
+ prop = "rohm,dvs-run-voltage";
+ reg = dvs->run_reg;
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 3fa472127e9a1..7c111bbdc2afa 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -544,14 +544,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ rdata = devm_kcalloc(&pdev->dev,
+ pdata->num_regulators, sizeof(*rdata),
+ GFP_KERNEL);
+- if (!rdata)
++ if (!rdata) {
++ of_node_put(regulators_np);
+ return -ENOMEM;
++ }
+
+ rmode = devm_kcalloc(&pdev->dev,
+ pdata->num_regulators, sizeof(*rmode),
+ GFP_KERNEL);
+- if (!rmode)
++ if (!rmode) {
++ of_node_put(regulators_np);
+ return -ENOMEM;
++ }
+
+ pdata->regulators = rdata;
+ pdata->opmode = rmode;
+@@ -573,10 +577,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ "s5m8767,pmic-ext-control",
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "s5m8767");
+- if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
++ if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) {
+ rdata->ext_control_gpiod = NULL;
+- else if (IS_ERR(rdata->ext_control_gpiod))
++ } else if (IS_ERR(rdata->ext_control_gpiod)) {
++ of_node_put(reg_np);
++ of_node_put(regulators_np);
+ return PTR_ERR(rdata->ext_control_gpiod);
++ }
+
+ rdata->id = i;
+ rdata->initdata = of_get_regulator_init_data(
+diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
+index 988edb4977c31..bcab38511bf31 100644
+--- a/drivers/remoteproc/mtk_common.h
++++ b/drivers/remoteproc/mtk_common.h
+@@ -47,6 +47,7 @@
+
+ #define MT8192_CORE0_SW_RSTN_CLR 0x10000
+ #define MT8192_CORE0_SW_RSTN_SET 0x10004
++#define MT8192_CORE0_WDT_IRQ 0x10030
+ #define MT8192_CORE0_WDT_CFG 0x10034
+
+ #define SCP_FW_VER_LEN 32
+diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
+index e0c2356903616..eba825b46696e 100644
+--- a/drivers/remoteproc/mtk_scp.c
++++ b/drivers/remoteproc/mtk_scp.c
+@@ -197,17 +197,19 @@ static void mt8192_scp_irq_handler(struct mtk_scp *scp)
+
+ scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+
+- if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
++ if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
+ scp_ipi_handler(scp);
+- else
+- scp_wdt_handler(scp, scp_to_host);
+
+- /*
+- * SCP won't send another interrupt until we clear
+- * MT8192_SCP2APMCU_IPC.
+- */
+- writel(MT8192_SCP_IPC_INT_BIT,
+- scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
++ /*
++ * SCP won't send another interrupt until we clear
++ * MT8192_SCP2APMCU_IPC.
++ */
++ writel(MT8192_SCP_IPC_INT_BIT,
++ scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
++ } else {
++ scp_wdt_handler(scp, scp_to_host);
++ writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
++ }
+ }
+
+ static irqreturn_t scp_irq_handler(int irq, void *priv)
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index 6123f9f4fbc90..4e2b3a175607b 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -692,6 +692,7 @@ config RTC_DRV_S5M
+ tristate "Samsung S2M/S5M series"
+ depends on MFD_SEC_CORE || COMPILE_TEST
+ select REGMAP_IRQ
++ select REGMAP_I2C
+ help
+ If you say yes here you will get support for the
+ RTC of Samsung S2MPS14 and S5M PMIC series.
+@@ -1300,7 +1301,7 @@ config RTC_DRV_OPAL
+
+ config RTC_DRV_ZYNQMP
+ tristate "Xilinx Zynq Ultrascale+ MPSoC RTC"
+- depends on OF
++ depends on OF && HAS_IOMEM
+ help
+ If you say yes here you get support for the RTC controller found on
+ Xilinx Zynq Ultrascale+ MPSoC.
+diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
+index a7b671a210223..79161d4c6ce4d 100644
+--- a/drivers/rtc/rtc-rx6110.c
++++ b/drivers/rtc/rtc-rx6110.c
+@@ -331,7 +331,7 @@ static int rx6110_probe(struct rx6110_data *rx6110, struct device *dev)
+ return 0;
+ }
+
+-#ifdef CONFIG_SPI_MASTER
++#if IS_ENABLED(CONFIG_SPI_MASTER)
+ static struct regmap_config regmap_spi_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+@@ -411,7 +411,7 @@ static void rx6110_spi_unregister(void)
+ }
+ #endif /* CONFIG_SPI_MASTER */
+
+-#ifdef CONFIG_I2C
++#if IS_ENABLED(CONFIG_I2C)
+ static struct regmap_config regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 10206e4498d07..52eaf51c9bb64 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -1438,6 +1438,8 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ if (rc) {
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
+ return rc;
+@@ -1481,6 +1483,8 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ if (rc) {
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
+ return rc;
+@@ -1524,6 +1528,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ if (rc)
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcRB.status);
+@@ -1568,6 +1574,8 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ if (rc)
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+@@ -1744,6 +1752,8 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ if (rc)
+ return rc;
+ return put_user(mex64.outputdatalength,
+@@ -1795,6 +1805,8 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ if (rc)
+ return rc;
+ return put_user(crt64.outputdatalength,
+@@ -1865,6 +1877,8 @@ static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
+ if (rc == -EAGAIN)
+ tr.again_counter++;
+ } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
++ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
++ rc = -EIO;
+ xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
+ xcRB32.reply_data_length = xcRB64.reply_data_length;
+ xcRB32.status = xcRB64.status;
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 5730572b52cd5..54e686dca6dea 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -117,7 +117,7 @@ struct virtio_rev_info {
+ };
+
+ /* the highest virtio-ccw revision we support */
+-#define VIRTIO_CCW_REV_MAX 1
++#define VIRTIO_CCW_REV_MAX 2
+
+ struct virtio_ccw_vq_info {
+ struct virtqueue *vq;
+@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+ u8 old_status = vcdev->dma_area->status;
+ struct ccw1 *ccw;
+
+- if (vcdev->revision < 1)
++ if (vcdev->revision < 2)
+ return vcdev->dma_area->status;
+
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
+index 13677973da5cf..770546177ca46 100644
+--- a/drivers/scsi/aic94xx/aic94xx_scb.c
++++ b/drivers/scsi/aic94xx/aic94xx_scb.c
+@@ -68,7 +68,6 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ struct done_list_struct *dl)
+ {
+ struct asd_ha_struct *asd_ha = ascb->ha;
+- struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+ int phy_id = dl->status_block[0] & DL_PHY_MASK;
+ struct asd_phy *phy = &asd_ha->phys[phy_id];
+
+@@ -81,7 +80,7 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
+ asd_turn_led(asd_ha, phy_id, 0);
+ sas_phy_disconnected(&phy->sas_phy);
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ break;
+ case CURRENT_OOB_DONE:
+ /* hot plugged device */
+@@ -89,12 +88,12 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ get_lrate_mode(phy, oob_mode);
+ ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
+ phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ break;
+ case CURRENT_SPINUP_HOLD:
+ /* hot plug SATA, no COMWAKE sent */
+ asd_turn_led(asd_ha, phy_id, 1);
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ break;
+ case CURRENT_GTO_TIMEOUT:
+ case CURRENT_OOB_ERROR:
+@@ -102,7 +101,7 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
+ dl->status_block[1]);
+ asd_turn_led(asd_ha, phy_id, 0);
+ sas_phy_disconnected(&phy->sas_phy);
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ break;
+ }
+ }
+@@ -222,7 +221,6 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
+ int edb_el = edb_id + ascb->edb_index;
+ struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
+ struct asd_phy *phy = &ascb->ha->phys[phy_id];
+- struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
+ u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
+
+ size = min(size, (u16) sizeof(phy->frame_rcvd));
+@@ -234,7 +232,7 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
+ spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+ asd_dump_frame_rcvd(phy, dl);
+ asd_form_port(ascb->ha, phy);
+- sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
++ sas_notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
+ }
+
+ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
+@@ -270,7 +268,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
+ asd_turn_led(asd_ha, phy_id, 0);
+ sas_phy_disconnected(sas_phy);
+ asd_deform_port(asd_ha, phy);
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+
+ if (retries_left == 0) {
+ int num = 1;
+@@ -315,7 +313,7 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = ffs(cont);
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+
+ case LmUNKNOWNP:
+@@ -336,7 +334,7 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
+ /* The sequencer disables all phys on that port.
+ * We have to re-enable the phys ourselves. */
+ asd_deform_port(asd_ha, phy);
+- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++ sas_notify_port_event(sas_phy, PORTE_HARD_RESET);
+ break;
+
+ default:
+@@ -567,7 +565,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
+ /* the device is gone */
+ sas_phy_disconnected(sas_phy);
+ asd_deform_port(asd_ha, phy);
+- sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
++ sas_notify_port_event(sas_phy, PORTE_TIMER_EVENT);
+ break;
+ default:
+ ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
+diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
+index 3cf7e08df8093..ecdc0f0f4f4e6 100644
+--- a/drivers/scsi/bnx2fc/Kconfig
++++ b/drivers/scsi/bnx2fc/Kconfig
+@@ -5,6 +5,7 @@ config SCSI_BNX2X_FCOE
+ depends on (IPV6 || IPV6=n)
+ depends on LIBFC
+ depends on LIBFCOE
++ depends on MMU
+ select NETDEVICES
+ select ETHERNET
+ select NET_VENDOR_BROADCOM
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index cf0bfac920a81..76f8fc3fad599 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -616,7 +616,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+ {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+- struct sas_ha_struct *sas_ha;
+
+ if (!phy->phy_attached)
+ return;
+@@ -627,8 +626,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+ return;
+ }
+
+- sas_ha = &hisi_hba->sha;
+- sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event(sas_phy, PHYE_OOB_DONE);
+
+ if (sas_phy->phy) {
+ struct sas_phy *sphy = sas_phy->phy;
+@@ -656,7 +654,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+ }
+
+ sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+- sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
++ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ }
+
+ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
+@@ -1411,7 +1409,6 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
+
+ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
+ {
+- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ struct asd_sas_port *_sas_port = NULL;
+ int phy_no;
+
+@@ -1432,7 +1429,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
+ _sas_port = sas_port;
+
+ if (dev_is_expander(dev->dev_type))
+- sas_ha->notify_port_event(sas_phy,
++ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD);
+ }
+ } else {
+@@ -2194,7 +2191,6 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
+ {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ struct device *dev = hisi_hba->dev;
+
+ if (rdy) {
+@@ -2210,7 +2206,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
+ return;
+ }
+ /* Phy down and not ready */
+- sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
++ sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
+ sas_phy_disconnected(sas_phy);
+
+ if (port) {
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index 45e866cb9164d..22eecc89d41bd 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1408,7 +1408,6 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
+ struct hisi_sas_phy *phy = p;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+- struct sas_ha_struct *sha = &hisi_hba->sha;
+ struct device *dev = hisi_hba->dev;
+ int phy_no = sas_phy->id;
+ u32 irq_value;
+@@ -1424,7 +1423,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
+ }
+
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+- sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+
+ end:
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index 9adfdefef9cad..10ba0680da04b 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2818,14 +2818,13 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+ {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_RX_BCST_ACK_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 7c12804b4e1d1..9d9dcc11a866b 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -1600,14 +1600,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+ {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ u32 bcast_status;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+ if ((bcast_status & RX_BCAST_CHG_MSK) &&
+ !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_RX_BCST_ACK_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
+index 1df45f028ea75..e50c3b0deeb30 100644
+--- a/drivers/scsi/isci/port.c
++++ b/drivers/scsi/isci/port.c
+@@ -164,7 +164,8 @@ static void isci_port_bc_change_received(struct isci_host *ihost,
+ "%s: isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+
+- ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event_gfp(&iphy->sas_phy,
++ PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+ sci_port_bcn_enable(iport);
+ }
+
+@@ -223,8 +224,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
+ /* Notify libsas that we have an address frame, if indeed
+ * we've found an SSP, SMP, or STP target */
+ if (success)
+- isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+- PORTE_BYTES_DMAED);
++ sas_notify_port_event_gfp(&iphy->sas_phy,
++ PORTE_BYTES_DMAED, GFP_ATOMIC);
+ }
+
+
+@@ -270,8 +271,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
+ * isci_port_deformed and isci_dev_gone functions.
+ */
+ sas_phy_disconnected(&isci_phy->sas_phy);
+- isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+- PHYE_LOSS_OF_SIGNAL);
++ sas_notify_phy_event_gfp(&isci_phy->sas_phy,
++ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p - Done\n", __func__, isci_port);
+diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
+index a1852f6c042b9..ba266a17250ae 100644
+--- a/drivers/scsi/libsas/sas_event.c
++++ b/drivers/scsi/libsas/sas_event.c
+@@ -109,7 +109,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
+
+ sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ port_phy_el);
+- ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ }
+ mutex_unlock(&ha->disco_mutex);
+ }
+@@ -131,18 +131,15 @@ static void sas_phy_event_worker(struct work_struct *work)
+ sas_free_event(ev);
+ }
+
+-static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
++static int __sas_notify_port_event(struct asd_sas_phy *phy,
++ enum port_event event,
++ struct asd_sas_event *ev)
+ {
+- struct asd_sas_event *ev;
+ struct sas_ha_struct *ha = phy->ha;
+ int ret;
+
+ BUG_ON(event >= PORT_NUM_EVENTS);
+
+- ev = sas_alloc_event(phy);
+- if (!ev)
+- return -ENOMEM;
+-
+ INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+@@ -152,18 +149,40 @@ static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+ return ret;
+ }
+
+-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
++int sas_notify_port_event_gfp(struct asd_sas_phy *phy, enum port_event event,
++ gfp_t gfp_flags)
+ {
+ struct asd_sas_event *ev;
+- struct sas_ha_struct *ha = phy->ha;
+- int ret;
+
+- BUG_ON(event >= PHY_NUM_EVENTS);
++ ev = sas_alloc_event_gfp(phy, gfp_flags);
++ if (!ev)
++ return -ENOMEM;
++
++ return __sas_notify_port_event(phy, event, ev);
++}
++EXPORT_SYMBOL_GPL(sas_notify_port_event_gfp);
++
++int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
++{
++ struct asd_sas_event *ev;
+
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
++ return __sas_notify_port_event(phy, event, ev);
++}
++EXPORT_SYMBOL_GPL(sas_notify_port_event);
++
++static inline int __sas_notify_phy_event(struct asd_sas_phy *phy,
++ enum phy_event event,
++ struct asd_sas_event *ev)
++{
++ struct sas_ha_struct *ha = phy->ha;
++ int ret;
++
++ BUG_ON(event >= PHY_NUM_EVENTS);
++
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+@@ -173,10 +192,27 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+ return ret;
+ }
+
+-int sas_init_events(struct sas_ha_struct *sas_ha)
++int sas_notify_phy_event_gfp(struct asd_sas_phy *phy, enum phy_event event,
++ gfp_t gfp_flags)
+ {
+- sas_ha->notify_port_event = sas_notify_port_event;
+- sas_ha->notify_phy_event = sas_notify_phy_event;
++ struct asd_sas_event *ev;
+
+- return 0;
++ ev = sas_alloc_event_gfp(phy, gfp_flags);
++ if (!ev)
++ return -ENOMEM;
++
++ return __sas_notify_phy_event(phy, event, ev);
++}
++EXPORT_SYMBOL_GPL(sas_notify_phy_event_gfp);
++
++int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
++{
++ struct asd_sas_event *ev;
++
++ ev = sas_alloc_event(phy);
++ if (!ev)
++ return -ENOMEM;
++
++ return __sas_notify_phy_event(phy, event, ev);
+ }
++EXPORT_SYMBOL_GPL(sas_notify_phy_event);
+diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
+index 21c43b18d5d5b..f8ae1f0f17d36 100644
+--- a/drivers/scsi/libsas/sas_init.c
++++ b/drivers/scsi/libsas/sas_init.c
+@@ -123,12 +123,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
+ goto Undo_phys;
+ }
+
+- error = sas_init_events(sas_ha);
+- if (error) {
+- pr_notice("couldn't start event thread:%d\n", error);
+- goto Undo_ports;
+- }
+-
+ error = -ENOMEM;
+ snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ sas_ha->event_q = create_singlethread_workqueue(name);
+@@ -590,16 +584,15 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
+ }
+ EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+-
+-struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
++static struct asd_sas_event *__sas_alloc_event(struct asd_sas_phy *phy,
++ gfp_t gfp_flags)
+ {
+ struct asd_sas_event *event;
+- gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+- event = kmem_cache_zalloc(sas_event_cache, flags);
++ event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
+ if (!event)
+ return NULL;
+
+@@ -610,7 +603,8 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+ if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ pr_notice("The phy%d bursting events, shut it down.\n",
+ phy->id);
+- sas_notify_phy_event(phy, PHYE_SHUTDOWN);
++ sas_notify_phy_event_gfp(phy, PHYE_SHUTDOWN,
++ gfp_flags);
+ }
+ } else {
+ /* Do not support PHY control, stop allocating events */
+@@ -624,6 +618,17 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+ return event;
+ }
+
++struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
++{
++ return __sas_alloc_event(phy, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
++}
++
++struct asd_sas_event *sas_alloc_event_gfp(struct asd_sas_phy *phy,
++ gfp_t gfp_flags)
++{
++ return __sas_alloc_event(phy, gfp_flags);
++}
++
+ void sas_free_event(struct asd_sas_event *event)
+ {
+ struct asd_sas_phy *phy = event->phy;
+diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
+index 1f1d01901978c..52e09c3e2b50d 100644
+--- a/drivers/scsi/libsas/sas_internal.h
++++ b/drivers/scsi/libsas/sas_internal.h
+@@ -49,12 +49,13 @@ int sas_register_phys(struct sas_ha_struct *sas_ha);
+ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+
+ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
++struct asd_sas_event *sas_alloc_event_gfp(struct asd_sas_phy *phy,
++ gfp_t gfp_flags);
+ void sas_free_event(struct asd_sas_event *event);
+
+ int sas_register_ports(struct sas_ha_struct *sas_ha);
+ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
+
+-int sas_init_events(struct sas_ha_struct *sas_ha);
+ void sas_disable_revalidation(struct sas_ha_struct *ha);
+ void sas_enable_revalidation(struct sas_ha_struct *ha);
+ void __sas_drain_work(struct sas_ha_struct *ha);
+@@ -78,6 +79,8 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
+ int sas_smp_get_phy_events(struct sas_phy *phy);
+
+ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
++int sas_notify_phy_event_gfp(struct asd_sas_phy *phy, enum phy_event event,
++ gfp_t flags);
+ void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
+ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
+ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 2b6b5fc671feb..e5ace4a4f432a 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -1145,13 +1145,14 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ struct lpfc_vport *vport = pmb->vport;
+ LPFC_MBOXQ_t *sparam_mb;
+ struct lpfc_dmabuf *sparam_mp;
++ u16 status = pmb->u.mb.mbxStatus;
+ int rc;
+
+- if (pmb->u.mb.mbxStatus)
+- goto out;
+-
+ mempool_free(pmb, phba->mbox_mem_pool);
+
++ if (status)
++ goto out;
++
+ /* don't perform discovery for SLI4 loopback diagnostic test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->hba_flag & HBA_FCOE_MODE) &&
+@@ -1214,12 +1215,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+
+ out:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+- "0306 CONFIG_LINK mbxStatus error x%x "
+- "HBA state x%x\n",
+- pmb->u.mb.mbxStatus, vport->port_state);
+-sparam_out:
+- mempool_free(pmb, phba->mbox_mem_pool);
++ "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
++ status, vport->port_state);
+
++sparam_out:
+ lpfc_linkdown(phba);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index a920eced92ecc..484e01428da28 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -216,11 +216,11 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+ }
+
+-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
++static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags)
+ {
+ struct mvs_phy *phy = &mvi->phy[i];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+- struct sas_ha_struct *sas_ha;
++
+ if (!phy->phy_attached)
+ return;
+
+@@ -229,8 +229,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+ return;
+ }
+
+- sas_ha = mvi->sas;
+- sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event_gfp(sas_phy, PHYE_OOB_DONE, gfp_flags);
+
+ if (sas_phy->phy) {
+ struct sas_phy *sphy = sas_phy->phy;
+@@ -262,8 +261,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+
+ sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+
+- mvi->sas->notify_port_event(sas_phy,
+- PORTE_BYTES_DMAED);
++ sas_notify_port_event_gfp(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
+ }
+
+ void mvs_scan_start(struct Scsi_Host *shost)
+@@ -279,7 +277,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
+ for (j = 0; j < core_nr; j++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+ for (i = 0; i < mvi->chip->n_phy; ++i)
+- mvs_bytes_dmaed(mvi, i);
++ mvs_bytes_dmaed(mvi, i, GFP_KERNEL);
+ }
+ mvs_prv->scan_finished = 1;
+ }
+@@ -1880,7 +1878,6 @@ static void mvs_work_queue(struct work_struct *work)
+ struct mvs_info *mvi = mwq->mvi;
+ unsigned long flags;
+ u32 phy_no = (unsigned long) mwq->data;
+- struct sas_ha_struct *sas_ha = mvi->sas;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+@@ -1895,21 +1892,21 @@ static void mvs_work_queue(struct work_struct *work)
+ if (!(tmp & PHY_READY_MASK)) {
+ sas_phy_disconnected(sas_phy);
+ mvs_phy_disconnected(phy);
+- sas_ha->notify_phy_event(sas_phy,
+- PHYE_LOSS_OF_SIGNAL);
++ sas_notify_phy_event_gfp(sas_phy,
++ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
+ mv_dprintk("phy%d Removed Device\n", phy_no);
+ } else {
+ MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+ mvs_update_phyinfo(mvi, phy_no, 1);
+- mvs_bytes_dmaed(mvi, phy_no);
++ mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
+ mvs_port_notify_formed(sas_phy, 0);
+ mv_dprintk("phy%d Attached Device\n", phy_no);
+ }
+ }
+ } else if (mwq->handler & EXP_BRCT_CHG) {
+ phy->phy_event &= ~EXP_BRCT_CHG;
+- sas_ha->notify_port_event(sas_phy,
+- PORTE_BROADCAST_RCVD);
++ sas_notify_port_event_gfp(sas_phy,
++ PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+ mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
+ }
+ list_del(&mwq->entry);
+@@ -2026,7 +2023,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+ mdelay(10);
+ }
+
+- mvs_bytes_dmaed(mvi, phy_no);
++ mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
+ /* whether driver is going to handle hot plug */
+ if (phy->phy_event & PHY_PLUG_OUT) {
+ mvs_port_notify_formed(&phy->sas_phy, 0);
+diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
+index c8d4d87c54737..dd15246d5b037 100644
+--- a/drivers/scsi/pm8001/pm8001_hwi.c
++++ b/drivers/scsi/pm8001/pm8001_hwi.c
+@@ -3179,7 +3179,7 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+ pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i);
+
+ sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+- pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
++ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ }
+
+ /* Get the link rate speed */
+@@ -3293,7 +3293,6 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ u8 deviceType = pPayload->sas_identify.dev_type;
+@@ -3337,7 +3336,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+ phy->sas_phy.oob_mode = SAS_OOB_MODE;
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+ sizeof(struct sas_identify_frame)-4);
+@@ -3369,7 +3368,6 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
+@@ -3381,7 +3379,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->phy_attached = 1;
+ phy->sas_phy.oob_mode = SATA_OOB_MODE;
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+ sizeof(struct dev_to_host_fis));
+@@ -3728,11 +3726,11 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ break;
+ case HW_EVENT_SATA_SPINUP_HOLD:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ break;
+ case HW_EVENT_PHY_DOWN:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ phy->phy_state = 0;
+ hw_event_phy_down(pm8001_ha, piomb);
+@@ -3741,7 +3739,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ /* the broadcast change primitive received, tell the LIBSAS this event
+ to revalidate the sas domain*/
+@@ -3752,20 +3750,20 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_PHY_ERROR:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ sas_phy_disconnected(&phy->sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ break;
+ case HW_EVENT_BROADCAST_EXP:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_LINK_ERR_INVALID_DWORD:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3774,7 +3772,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3784,7 +3782,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3794,7 +3792,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3804,7 +3802,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_MALFUNCTION:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
+@@ -3814,7 +3812,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_INBOUND_CRC_ERROR:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+@@ -3824,13 +3822,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ break;
+ case HW_EVENT_HARD_RESET_RECEIVED:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++ sas_notify_port_event(sas_phy, PORTE_HARD_RESET);
+ break;
+ case HW_EVENT_ID_FRAME_TIMEOUT:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3840,20 +3838,20 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RESET_TIMER_TMO:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+ pm8001_dbg(pm8001_ha, MSG,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RECOVER:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index d1e9dba2ef193..e21c6cfff4cbd 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -158,7 +158,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ int rc = 0, phy_id = sas_phy->id;
+ struct pm8001_hba_info *pm8001_ha = NULL;
+ struct sas_phy_linkrates *rates;
+- struct sas_ha_struct *sas_ha;
+ struct pm8001_phy *phy;
+ DECLARE_COMPLETION_ONSTACK(completion);
+ unsigned long flags;
+@@ -207,18 +206,16 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+ if (pm8001_ha->chip_id != chip_8001) {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPCV) {
+- sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+- sas_ha->notify_phy_event(&phy->sas_phy,
++ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ } else {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPC) {
+- sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+- sas_ha->notify_phy_event(&phy->sas_phy,
++ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
+index 6772b0924dac8..f617177b7bb33 100644
+--- a/drivers/scsi/pm8001/pm80xx_hwi.c
++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
+@@ -3243,7 +3243,6 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ u8 deviceType = pPayload->sas_identify.dev_type;
+@@ -3288,7 +3287,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+ phy->sas_phy.oob_mode = SAS_OOB_MODE;
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+ sizeof(struct sas_identify_frame)-4);
+@@ -3322,7 +3321,6 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ unsigned long flags;
+ pm8001_dbg(pm8001_ha, DEVIO,
+@@ -3336,7 +3334,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->phy_attached = 1;
+ phy->sas_phy.oob_mode = SATA_OOB_MODE;
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+ sizeof(struct dev_to_host_fis));
+@@ -3418,11 +3416,8 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ break;
+
+ }
+- if (port_sata && (portstate != PORT_IN_RESET)) {
+- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+-
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+- }
++ if (port_sata && (portstate != PORT_IN_RESET))
++ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ }
+
+ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+@@ -3520,7 +3515,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ break;
+ case HW_EVENT_SATA_SPINUP_HOLD:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ break;
+ case HW_EVENT_PHY_DOWN:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
+@@ -3536,7 +3531,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ /* the broadcast change primitive received, tell the LIBSAS this event
+ to revalidate the sas domain*/
+@@ -3547,20 +3542,20 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_PHY_ERROR:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ sas_phy_disconnected(&phy->sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
++ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ break;
+ case HW_EVENT_BROADCAST_EXP:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_LINK_ERR_INVALID_DWORD:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3597,7 +3592,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+ sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+ spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
++ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ break;
+ case HW_EVENT_INBOUND_CRC_ERROR:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+@@ -3607,13 +3602,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ break;
+ case HW_EVENT_HARD_RESET_RECEIVED:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
++ sas_notify_port_event(sas_phy, PORTE_HARD_RESET);
+ break;
+ case HW_EVENT_ID_FRAME_TIMEOUT:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+ pm8001_dbg(pm8001_ha, MSG,
+@@ -3623,7 +3618,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ break;
+ case HW_EVENT_PORT_RESET_TIMER_TMO:
+ pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+@@ -3631,7 +3626,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ port_id, phy_id, 0, 0);
+ sas_phy_disconnected(sas_phy);
+ phy->phy_attached = 0;
+- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
++ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ if (pm8001_ha->phy[phy_id].reset_completion) {
+ pm8001_ha->phy[phy_id].port_reset_status =
+ PORT_RESET_TMO;
+@@ -3648,7 +3643,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ if (port->wide_port_phymap & (1 << i)) {
+ phy = &pm8001_ha->phy[i];
+- sas_ha->notify_phy_event(&phy->sas_phy,
++ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ port->wide_port_phymap &= ~(1 << i);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index bb7431912d410..144a893e7335b 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -202,6 +202,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
+ wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ wrt_reg_word(®->mailbox1, LSW(addr));
+ wrt_reg_word(®->mailbox8, MSW(addr));
++ wrt_reg_word(®->mailbox10, 0);
+
+ wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma)));
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index d7d4ab65009c4..510cbe2bf1e5b 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -4276,7 +4276,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
+ if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
+ mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
+ mcp->mb[8] = MSW(addr);
+- mcp->out_mb = MBX_8|MBX_0;
++ mcp->mb[10] = 0;
++ mcp->out_mb = MBX_10|MBX_8|MBX_0;
+ } else {
+ mcp->mb[0] = MBC_DUMP_RISC_RAM;
+ mcp->out_mb = MBX_0;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index a3d2d4bc4a3dc..6a3a163b07065 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -707,9 +707,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
+ put_unaligned_be16(spsp, &cdb[2]);
+ put_unaligned_be32(len, &cdb[6]);
+
+- ret = scsi_execute_req(sdev, cdb,
+- send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+- buffer, len, NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
++ ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
++ buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
++ RQF_PM, NULL);
+ return ret <= 0 ? ret : -EIO;
+ }
+ #endif /* CONFIG_BLK_SED_OPAL */
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index cf07b7f935790..87a7274e4632b 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -688,6 +688,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+ unsigned int nr_zones = sdkp->rev_nr_zones;
+ u32 max_append;
+ int ret = 0;
++ unsigned int flags;
+
+ /*
+ * For all zoned disks, initialize zone append emulation data if not
+@@ -720,16 +721,19 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
+ disk->queue->nr_zones == nr_zones)
+ goto unlock;
+
++ flags = memalloc_noio_save();
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
+- sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO);
++ sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
+ if (!sdkp->rev_wp_offset) {
+ ret = -ENOMEM;
++ memalloc_noio_restore(flags);
+ goto unlock;
+ }
+
+ ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
+
++ memalloc_noio_restore(flags);
+ kvfree(sdkp->rev_wp_offset);
+ sdkp->rev_wp_offset = NULL;
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index fb32d122f2e38..728168cd18f55 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -94,6 +94,8 @@
+ 16, 4, buf, __len, false); \
+ } while (0)
+
++static bool early_suspend;
++
+ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
+ const char *prefix)
+ {
+@@ -8939,8 +8941,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
+ int ret = 0;
+ ktime_t start = ktime_get();
+
++ if (!hba) {
++ early_suspend = true;
++ return 0;
++ }
++
+ down(&hba->eh_sem);
+- if (!hba || !hba->is_powered)
++
++ if (!hba->is_powered)
+ return 0;
+
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+@@ -8989,9 +8997,12 @@ int ufshcd_system_resume(struct ufs_hba *hba)
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+- if (!hba) {
+- up(&hba->eh_sem);
++ if (!hba)
+ return -EINVAL;
++
++ if (unlikely(early_suspend)) {
++ early_suspend = false;
++ down(&hba->eh_sem);
+ }
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 682ba0eb4eba1..20acac6342eff 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -11,6 +11,7 @@
+ */
+
+ #include
++#include
+ #include
+ #include
+ #include
+@@ -67,6 +68,7 @@ struct aspeed_lpc_snoop_channel {
+ struct aspeed_lpc_snoop {
+ struct regmap *regmap;
+ int irq;
++ struct clk *clk;
+ struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+ };
+
+@@ -282,22 +284,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
++ lpc_snoop->clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(lpc_snoop->clk)) {
++ rc = PTR_ERR(lpc_snoop->clk);
++ if (rc != -EPROBE_DEFER)
++ dev_err(dev, "couldn't get clock\n");
++ return rc;
++ }
++ rc = clk_prepare_enable(lpc_snoop->clk);
++ if (rc) {
++ dev_err(dev, "couldn't enable clock\n");
++ return rc;
++ }
++
+ rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+ if (rc)
+- return rc;
++ goto err;
+
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
+ if (rc)
+- return rc;
++ goto err;
+
+ /* Configuration of 2nd snoop channel port is optional */
+ if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+ 1, &port) == 0) {
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
+- if (rc)
++ if (rc) {
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
++ goto err;
++ }
+ }
+
++ return 0;
++
++err:
++ clk_disable_unprepare(lpc_snoop->clk);
++
+ return rc;
+ }
+
+@@ -309,6 +331,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ aspeed_lpc_disable_snoop(lpc_snoop, 1);
+
++ clk_disable_unprepare(lpc_snoop->clk);
++
+ return 0;
+ }
+
+diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
+index 773930e0cb100..e3215f826d17a 100644
+--- a/drivers/soc/aspeed/aspeed-socinfo.c
++++ b/drivers/soc/aspeed/aspeed-socinfo.c
+@@ -25,6 +25,7 @@ static struct {
+ /* AST2600 */
+ { "AST2600", 0x05000303 },
+ { "AST2620", 0x05010203 },
++ { "AST2605", 0x05030103 },
+ };
+
+ static const char *siliconid_to_name(u32 siliconid)
+@@ -43,14 +44,30 @@ static const char *siliconid_to_name(u32 siliconid)
+ static const char *siliconid_to_rev(u32 siliconid)
+ {
+ unsigned int rev = (siliconid >> 16) & 0xff;
+-
+- switch (rev) {
+- case 0:
+- return "A0";
+- case 1:
+- return "A1";
+- case 3:
+- return "A2";
++ unsigned int gen = (siliconid >> 24) & 0xff;
++
++ if (gen < 0x5) {
++ /* AST2500 and below */
++ switch (rev) {
++ case 0:
++ return "A0";
++ case 1:
++ return "A1";
++ case 3:
++ return "A2";
++ }
++ } else {
++ /* AST2600 */
++ switch (rev) {
++ case 0:
++ return "A0";
++ case 1:
++ return "A1";
++ case 2:
++ return "A2";
++ case 3:
++ return "A3";
++ }
+ }
+
+ return "??";
+diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
+index 7f9e9944d1eae..f1875dc31ae2c 100644
+--- a/drivers/soc/qcom/ocmem.c
++++ b/drivers/soc/qcom/ocmem.c
+@@ -189,6 +189,7 @@ struct ocmem *of_get_ocmem(struct device *dev)
+ {
+ struct platform_device *pdev;
+ struct device_node *devnode;
++ struct ocmem *ocmem;
+
+ devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ if (!devnode || !devnode->parent) {
+@@ -202,7 +203,12 @@ struct ocmem *of_get_ocmem(struct device *dev)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+- return platform_get_drvdata(pdev);
++ ocmem = platform_get_drvdata(pdev);
++ if (!ocmem) {
++ dev_err(dev, "Cannot get ocmem\n");
++ return ERR_PTR(-ENODEV);
++ }
++ return ocmem;
+ }
+ EXPORT_SYMBOL(of_get_ocmem);
+
+diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
+index d21530d24253e..6daa3c5771d16 100644
+--- a/drivers/soc/qcom/socinfo.c
++++ b/drivers/soc/qcom/socinfo.c
+@@ -286,7 +286,7 @@ static int qcom_show_pmic_model(struct seq_file *seq, void *p)
+ if (model < 0)
+ return -EINVAL;
+
+- if (model <= ARRAY_SIZE(pmic_models) && pmic_models[model])
++ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
+diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
+index 8abf4dfaa5c59..5daeadc363829 100644
+--- a/drivers/soc/samsung/exynos-asv.c
++++ b/drivers/soc/samsung/exynos-asv.c
+@@ -119,11 +119,6 @@ static int exynos_asv_probe(struct platform_device *pdev)
+ u32 product_id = 0;
+ int ret, i;
+
+- cpu_dev = get_cpu_device(0);
+- ret = dev_pm_opp_get_opp_count(cpu_dev);
+- if (ret < 0)
+- return -EPROBE_DEFER;
+-
+ asv = devm_kzalloc(&pdev->dev, sizeof(*asv), GFP_KERNEL);
+ if (!asv)
+ return -ENOMEM;
+@@ -134,7 +129,13 @@ static int exynos_asv_probe(struct platform_device *pdev)
+ return PTR_ERR(asv->chipid_regmap);
+ }
+
+- regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
++ ret = regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID,
++ &product_id);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot read revision from ChipID: %d\n",
++ ret);
++ return -ENODEV;
++ }
+
+ switch (product_id & EXYNOS_MASK) {
+ case 0xE5422000:
+@@ -144,6 +145,11 @@ static int exynos_asv_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
++ cpu_dev = get_cpu_device(0);
++ ret = dev_pm_opp_get_opp_count(cpu_dev);
++ if (ret < 0)
++ return -EPROBE_DEFER;
++
+ ret = of_property_read_u32(pdev->dev.of_node, "samsung,asv-bin",
+ &asv->of_bin);
+ if (ret < 0)
+diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
+index 64f3e31055401..7bab4bbaf02dc 100644
+--- a/drivers/soc/ti/pm33xx.c
++++ b/drivers/soc/ti/pm33xx.c
+@@ -535,7 +535,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
+
+ ret = am33xx_push_sram_idle();
+ if (ret)
+- goto err_free_sram;
++ goto err_unsetup_rtc;
+
+ am33xx_pm_set_ipc_ops();
+
+@@ -575,6 +575,9 @@ err_pm_runtime_put:
+ err_pm_runtime_disable:
+ pm_runtime_disable(dev);
+ wkup_m3_ipc_put(m3_ipc);
++err_unsetup_rtc:
++ iounmap(rtc_base_virt);
++ clk_put(rtc_fck);
+ err_free_sram:
+ am33xx_pm_free_sram();
+ pm33xx_dev = NULL;
+diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
+index d1e8c3a54976b..662b3b0302467 100644
+--- a/drivers/soundwire/bus.c
++++ b/drivers/soundwire/bus.c
+@@ -405,10 +405,11 @@ sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ return sdw_transfer(slave->bus, &msg);
+ }
+
+-static int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
++int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+ {
+ return sdw_nwrite_no_pm(slave, addr, 1, &value);
+ }
++EXPORT_SYMBOL(sdw_write_no_pm);
+
+ static int
+ sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
+@@ -476,8 +477,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
+ }
+ EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
+
+-static int
+-sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
++int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+ {
+ u8 buf;
+ int ret;
+@@ -488,6 +488,19 @@ sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+ else
+ return buf;
+ }
++EXPORT_SYMBOL(sdw_read_no_pm);
++
++static int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
++{
++ int tmp;
++
++ tmp = sdw_read_no_pm(slave, addr);
++ if (tmp < 0)
++ return tmp;
++
++ tmp = (tmp & ~mask) | val;
++ return sdw_write_no_pm(slave, addr, tmp);
++}
+
+ /**
+ * sdw_nread() - Read "n" contiguous SDW Slave registers
+@@ -500,16 +513,16 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ {
+ int ret;
+
+- ret = pm_runtime_get_sync(slave->bus->dev);
++ ret = pm_runtime_get_sync(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+- pm_runtime_put_noidle(slave->bus->dev);
++ pm_runtime_put_noidle(&slave->dev);
+ return ret;
+ }
+
+ ret = sdw_nread_no_pm(slave, addr, count, val);
+
+- pm_runtime_mark_last_busy(slave->bus->dev);
+- pm_runtime_put(slave->bus->dev);
++ pm_runtime_mark_last_busy(&slave->dev);
++ pm_runtime_put(&slave->dev);
+
+ return ret;
+ }
+@@ -526,16 +539,16 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+ {
+ int ret;
+
+- ret = pm_runtime_get_sync(slave->bus->dev);
++ ret = pm_runtime_get_sync(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+- pm_runtime_put_noidle(slave->bus->dev);
++ pm_runtime_put_noidle(&slave->dev);
+ return ret;
+ }
+
+ ret = sdw_nwrite_no_pm(slave, addr, count, val);
+
+- pm_runtime_mark_last_busy(slave->bus->dev);
+- pm_runtime_put(slave->bus->dev);
++ pm_runtime_mark_last_busy(&slave->dev);
++ pm_runtime_put(&slave->dev);
+
+ return ret;
+ }
+@@ -1210,7 +1223,7 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
+ }
+ scale_index++;
+
+- ret = sdw_write(slave, SDW_SCP_BUS_CLOCK_BASE, base);
++ ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
+@@ -1218,13 +1231,13 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
+ }
+
+ /* initialize scale for both banks */
+- ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
++ ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
+ if (ret < 0) {
+ dev_err(&slave->dev,
+ "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
+ return ret;
+ }
+- ret = sdw_write(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
++ ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
+ if (ret < 0)
+ dev_err(&slave->dev,
+ "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
+@@ -1256,7 +1269,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
+ val = slave->prop.scp_int1_mask;
+
+ /* Enable SCP interrupts */
+- ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
++ ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
+ if (ret < 0) {
+ dev_err(slave->bus->dev,
+ "SDW_SCP_INTMASK1 write failed:%d\n", ret);
+@@ -1271,7 +1284,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
+ val = prop->dp0_prop->imp_def_interrupts;
+ val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
+
+- ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
++ ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
+ if (ret < 0)
+ dev_err(slave->bus->dev,
+ "SDW_DP0_INTMASK read failed:%d\n", ret);
+@@ -1440,7 +1453,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
+ ret = pm_runtime_get_sync(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
+- pm_runtime_put_noidle(slave->bus->dev);
++ pm_runtime_put_noidle(&slave->dev);
+ return ret;
+ }
+
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 9fa55164354a2..580660599f461 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -484,10 +484,10 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
+ if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+ no_ack = 1;
+ dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n");
+- if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+- nack = 1;
+- dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
+- }
++ }
++ if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
++ nack = 1;
++ dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
+ }
+ }
+
+diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
+index cabdadb09a1bb..bc8520eb385ec 100644
+--- a/drivers/soundwire/intel_init.c
++++ b/drivers/soundwire/intel_init.c
+@@ -405,11 +405,12 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+ {
+ acpi_status status;
+
++ info->handle = NULL;
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ parent_handle, 1,
+ sdw_intel_acpi_cb,
+ NULL, info, NULL);
+- if (ACPI_FAILURE(status))
++ if (ACPI_FAILURE(status) || info->handle == NULL)
+ return -ENODEV;
+
+ return sdw_intel_scan_controller(info);
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 948396b382d73..f429436082afa 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1590,7 +1590,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
+ if (ret == 0) {
+ as->use_dma = true;
+ } else if (ret == -EPROBE_DEFER) {
+- return ret;
++ goto out_unmap_regs;
+ }
+ } else if (as->caps.has_pdc_support) {
+ as->use_pdc = true;
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index ba7d40c2922f7..826b01f346246 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -461,7 +461,7 @@ static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
+ /* Setup dummy clock cycles */
+ dummy_clk = op->dummy.nbytes * 8;
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+- dummy_clk = CQSPI_DUMMY_CLKS_MAX;
++ return -EOPNOTSUPP;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
+index 4aa8596fb1f2b..5be6b7b80c21b 100644
+--- a/drivers/spi/spi-dw-bt1.c
++++ b/drivers/spi/spi-dw-bt1.c
+@@ -84,7 +84,7 @@ static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t
+ if (shift) {
+ chunk = min_t(size_t, 4 - shift, len);
+ data = readl_relaxed(from - shift);
+- memcpy(to, &data + shift, chunk);
++ memcpy(to, (char *)&data + shift, chunk);
+ from += chunk;
+ to += chunk;
+ len -= chunk;
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 6d8e0a05a5355..e4a8d203f9408 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -695,7 +695,7 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+
+ if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
+ return;
+- iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
++ iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
+ }
+ }
+
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 73ca821763d69..5dc4ea4b4450e 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1685,7 +1685,7 @@ static int spi_imx_probe(struct platform_device *pdev)
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
++ dev_err_probe(&pdev->dev, ret, "bitbang start failed\n");
+ goto out_bitbang_start;
+ }
+
+diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
+index f236e3034cf85..aafac128bb5f1 100644
+--- a/drivers/spi/spi-pxa2xx-pci.c
++++ b/drivers/spi/spi-pxa2xx-pci.c
+@@ -21,7 +21,8 @@ enum {
+ PORT_BSW1,
+ PORT_BSW2,
+ PORT_CE4100,
+- PORT_LPT,
++ PORT_LPT0,
++ PORT_LPT1,
+ };
+
+ struct pxa_spi_info {
+@@ -57,8 +58,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
+ static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
+ static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
+
+-static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
+-static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
++static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
++static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
++static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
++static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
+
+ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
+ {
+@@ -185,12 +188,19 @@ static struct pxa_spi_info spi_info_configs[] = {
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ },
+- [PORT_LPT] = {
++ [PORT_LPT0] = {
+ .type = LPSS_LPT_SSP,
+ .port_id = 0,
+ .setup = lpss_spi_setup,
+- .tx_param = &lpt_tx_param,
+- .rx_param = &lpt_rx_param,
++ .tx_param = &lpt0_tx_param,
++ .rx_param = &lpt0_rx_param,
++ },
++ [PORT_LPT1] = {
++ .type = LPSS_LPT_SSP,
++ .port_id = 1,
++ .setup = lpss_spi_setup,
++ .tx_param = &lpt1_tx_param,
++ .rx_param = &lpt1_rx_param,
+ },
+ };
+
+@@ -285,8 +295,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
+ { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
+ { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+ { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
+- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
+- { },
++ { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
++ { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
++ { }
+ };
+ MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
+
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 6017209c6d2f7..6eeb39669a866 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1677,6 +1677,10 @@ static int stm32_spi_transfer_one(struct spi_master *master,
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
++ /* Don't do anything on 0 bytes transfers */
++ if (transfer->len == 0)
++ return 0;
++
+ spi->tx_buf = transfer->tx_buf;
+ spi->rx_buf = transfer->rx_buf;
+ spi->tx_len = spi->tx_buf ? transfer->len : 0;
+diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
+index 8cdca6ab80989..ea706d9629cb1 100644
+--- a/drivers/spi/spi-synquacer.c
++++ b/drivers/spi/spi-synquacer.c
+@@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
+ val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
+ SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
+ val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
++
++ if (!enable)
++ val |= SYNQUACER_HSSPI_DMSTOP_STOP;
++
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ }
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 720ab34784c1d..ccca3a7409fac 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1267,7 +1267,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
+ ptp_read_system_prets(xfer->ptp_sts);
+ }
+
+- if (xfer->tx_buf || xfer->rx_buf) {
++ if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ reinit_completion(&ctlr->xfer_completion);
+
+ fallback_pio:
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index de844b4121107..bbbd311eda030 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
+ */
+ #include
+ #include
+@@ -505,8 +505,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
+ static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
+ {
+ unsigned int irq;
+- u32 status;
+- int id;
++ u32 status, id;
+ u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
+
+diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
+index dc4da66c3695b..54bdb64f52e88 100644
+--- a/drivers/staging/gdm724x/gdm_usb.c
++++ b/drivers/staging/gdm724x/gdm_usb.c
+@@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev,
+
+ static int request_mac_address(struct lte_udev *udev)
+ {
+- u8 buf[16] = {0,};
+- struct hci_packet *hci = (struct hci_packet *)buf;
++ struct hci_packet *hci;
+ struct usb_device *usbdev = udev->usbdev;
+ int actual;
+ int ret = -1;
+
++ hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
++ if (!hci)
++ return -ENOMEM;
++
+ hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
+ hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
+ hci->data[0] = MAC_ADDRESS;
+
+- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
++ ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
+ &actual, 1000);
+
+ udev->request_mac_addr = 1;
++ kfree(hci);
+
+ return ret;
+ }
+diff --git a/drivers/staging/media/allegro-dvt/allegro-core.c b/drivers/staging/media/allegro-dvt/allegro-core.c
+index 9f718f43282bc..640451134072b 100644
+--- a/drivers/staging/media/allegro-dvt/allegro-core.c
++++ b/drivers/staging/media/allegro-dvt/allegro-core.c
+@@ -2483,8 +2483,6 @@ static int allegro_open(struct file *file)
+ INIT_LIST_HEAD(&channel->buffers_reference);
+ INIT_LIST_HEAD(&channel->buffers_intermediate);
+
+- list_add(&channel->list, &dev->channels);
+-
+ channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel,
+ allegro_queue_init);
+
+@@ -2493,6 +2491,7 @@ static int allegro_open(struct file *file)
+ goto error;
+ }
+
++ list_add(&channel->list, &dev->channels);
+ file->private_data = &channel->fh;
+ v4l2_fh_add(&channel->fh);
+
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+index b666cb23e5ca1..2ef5f44e4b6b6 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+@@ -349,12 +349,20 @@ static int isp_subdev_get_selection(struct v4l2_subdev *sd,
+ return 0;
+ }
+
+-static char *atomisp_pad_str[] = { "ATOMISP_SUBDEV_PAD_SINK",
+- "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
+- "ATOMISP_SUBDEV_PAD_SOURCE_VF",
+- "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
+- "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO"
+- };
++static const char *atomisp_pad_str(unsigned int pad)
++{
++ static const char *const pad_str[] = {
++ "ATOMISP_SUBDEV_PAD_SINK",
++ "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
++ "ATOMISP_SUBDEV_PAD_SOURCE_VF",
++ "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
++ "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO",
++ };
++
++ if (pad >= ARRAY_SIZE(pad_str))
++ return "ATOMISP_INVALID_PAD";
++ return pad_str[pad];
++}
+
+ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+@@ -378,7 +386,7 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+
+ dev_dbg(isp->dev,
+ "sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
+- atomisp_pad_str[pad], target == V4L2_SEL_TGT_CROP
++ atomisp_pad_str(pad), target == V4L2_SEL_TGT_CROP
+ ? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
+ r->left, r->top, r->width, r->height,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+@@ -612,7 +620,7 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ enum atomisp_input_stream_id stream_id;
+
+ dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
+- atomisp_pad_str[pad], ffmt->width, ffmt->height, ffmt->code,
++ atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE");
+
+diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
+index e0eaff0f8a228..6a5ee46070898 100644
+--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
++++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
+@@ -269,7 +269,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ hmm_set(bo->start, 0, bytes);
+
+ dev_dbg(atomisp_dev,
+- "%s: pages: 0x%08x (%ld bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
++ "%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
+ __func__, bo->start, bytes, type, from_highmem, userptr, cached);
+
+ return bo->start;
+diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
+index fab1155a5958c..63a0204502a8b 100644
+--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
+@@ -869,11 +869,7 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+- mutex_lock(&priv->mutex);
+-
+ video_unregister_device(vfd);
+-
+- mutex_unlock(&priv->mutex);
+ }
+
+ struct imx_media_video_dev *
+diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
+index 6d2205461e565..338b8bd0bb076 100644
+--- a/drivers/staging/media/imx/imx-media-dev.c
++++ b/drivers/staging/media/imx/imx-media-dev.c
+@@ -53,6 +53,7 @@ static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
+ imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+ if (IS_ERR(imxmd->m2m_vdev)) {
+ ret = PTR_ERR(imxmd->m2m_vdev);
++ imxmd->m2m_vdev = NULL;
+ goto unlock;
+ }
+
+@@ -107,10 +108,14 @@ static int imx_media_remove(struct platform_device *pdev)
+
+ v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+
++ if (imxmd->m2m_vdev) {
++ imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
++ imxmd->m2m_vdev = NULL;
++ }
++
+ v4l2_async_notifier_unregister(&imxmd->notifier);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ v4l2_async_notifier_cleanup(&imxmd->notifier);
+- imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+ media_device_unregister(&imxmd->md);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
+index a3f3df9017046..ac52b1daf9914 100644
+--- a/drivers/staging/media/imx/imx7-media-csi.c
++++ b/drivers/staging/media/imx/imx7-media-csi.c
+@@ -499,6 +499,7 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
+ struct v4l2_subdev_format *sink_fmt)
+ {
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
++ struct media_entity *src;
+ struct media_pad *pad;
+ int ret;
+
+@@ -509,11 +510,21 @@ static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
+ if (!csi->src_sd)
+ return -EPIPE;
+
++ src = &csi->src_sd->entity;
++
++ /*
++ * if the source is neither a CSI MUX or CSI-2 get the one directly
++ * upstream from this CSI
++ */
++ if (src->function != MEDIA_ENT_F_VID_IF_BRIDGE &&
++ src->function != MEDIA_ENT_F_VID_MUX)
++ src = &csi->sd.entity;
++
+ /*
+- * find the entity that is selected by the CSI mux. This is needed
++ * find the entity that is selected by the source. This is needed
+ * to distinguish between a parallel or CSI-2 pipeline.
+ */
+- pad = imx_media_pipeline_pad(&csi->src_sd->entity, 0, 0, true);
++ pad = imx_media_pipeline_pad(src, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
+
+@@ -1164,12 +1175,12 @@ static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
+ struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
+
+- /* The bound subdev must always be the CSI mux */
+- if (WARN_ON(sd->entity.function != MEDIA_ENT_F_VID_MUX))
+- return -ENXIO;
+-
+- /* Mark it as such via its group id */
+- sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
++ /*
++ * If the subdev is a video mux, it must be one of the CSI
++ * muxes. Mark it as such via its group id.
++ */
++ if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
++ sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink);
+ }
+diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
+index 66da1bf10c32e..23256d1286f3e 100644
+--- a/drivers/staging/mt7621-dma/Makefile
++++ b/drivers/staging/mt7621-dma/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
++obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
+
+ ccflags-y += -I$(srctree)/drivers/dma
+diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
+new file mode 100644
+index 0000000000000..b0ed935de7acc
+--- /dev/null
++++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
+@@ -0,0 +1,760 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright (C) 2015, Michael Lee
++ * MTK HSDMA support
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#include "virt-dma.h"
++
++#define HSDMA_BASE_OFFSET 0x800
++
++#define HSDMA_REG_TX_BASE 0x00
++#define HSDMA_REG_TX_CNT 0x04
++#define HSDMA_REG_TX_CTX 0x08
++#define HSDMA_REG_TX_DTX 0x0c
++#define HSDMA_REG_RX_BASE 0x100
++#define HSDMA_REG_RX_CNT 0x104
++#define HSDMA_REG_RX_CRX 0x108
++#define HSDMA_REG_RX_DRX 0x10c
++#define HSDMA_REG_INFO 0x200
++#define HSDMA_REG_GLO_CFG 0x204
++#define HSDMA_REG_RST_CFG 0x208
++#define HSDMA_REG_DELAY_INT 0x20c
++#define HSDMA_REG_FREEQ_THRES 0x210
++#define HSDMA_REG_INT_STATUS 0x220
++#define HSDMA_REG_INT_MASK 0x228
++#define HSDMA_REG_SCH_Q01 0x280
++#define HSDMA_REG_SCH_Q23 0x284
++
++#define HSDMA_DESCS_MAX 0xfff
++#define HSDMA_DESCS_NUM 8
++#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
++#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
++
++/* HSDMA_REG_INFO */
++#define HSDMA_INFO_INDEX_MASK 0xf
++#define HSDMA_INFO_INDEX_SHIFT 24
++#define HSDMA_INFO_BASE_MASK 0xff
++#define HSDMA_INFO_BASE_SHIFT 16
++#define HSDMA_INFO_RX_MASK 0xff
++#define HSDMA_INFO_RX_SHIFT 8
++#define HSDMA_INFO_TX_MASK 0xff
++#define HSDMA_INFO_TX_SHIFT 0
++
++/* HSDMA_REG_GLO_CFG */
++#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
++#define HSDMA_GLO_CLK_GATE BIT(30)
++#define HSDMA_GLO_BYTE_SWAP BIT(29)
++#define HSDMA_GLO_MULTI_DMA BIT(10)
++#define HSDMA_GLO_TWO_BUF BIT(9)
++#define HSDMA_GLO_32B_DESC BIT(8)
++#define HSDMA_GLO_BIG_ENDIAN BIT(7)
++#define HSDMA_GLO_TX_DONE BIT(6)
++#define HSDMA_GLO_BT_MASK 0x3
++#define HSDMA_GLO_BT_SHIFT 4
++#define HSDMA_GLO_RX_BUSY BIT(3)
++#define HSDMA_GLO_RX_DMA BIT(2)
++#define HSDMA_GLO_TX_BUSY BIT(1)
++#define HSDMA_GLO_TX_DMA BIT(0)
++
++#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
++
++#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
++ HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
++
++/* HSDMA_REG_RST_CFG */
++#define HSDMA_RST_RX_SHIFT 16
++#define HSDMA_RST_TX_SHIFT 0
++
++/* HSDMA_REG_DELAY_INT */
++#define HSDMA_DELAY_INT_EN BIT(15)
++#define HSDMA_DELAY_PEND_OFFSET 8
++#define HSDMA_DELAY_TIME_OFFSET 0
++#define HSDMA_DELAY_TX_OFFSET 16
++#define HSDMA_DELAY_RX_OFFSET 0
++
++#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
++ ((x) << HSDMA_DELAY_PEND_OFFSET))
++#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
++ HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
++
++/* HSDMA_REG_INT_STATUS */
++#define HSDMA_INT_DELAY_RX_COH BIT(31)
++#define HSDMA_INT_DELAY_RX_INT BIT(30)
++#define HSDMA_INT_DELAY_TX_COH BIT(29)
++#define HSDMA_INT_DELAY_TX_INT BIT(28)
++#define HSDMA_INT_RX_MASK 0x3
++#define HSDMA_INT_RX_SHIFT 16
++#define HSDMA_INT_RX_Q0 BIT(16)
++#define HSDMA_INT_TX_MASK 0xf
++#define HSDMA_INT_TX_SHIFT 0
++#define HSDMA_INT_TX_Q0 BIT(0)
++
++/* tx/rx dma desc flags */
++#define HSDMA_PLEN_MASK 0x3fff
++#define HSDMA_DESC_DONE BIT(31)
++#define HSDMA_DESC_LS0 BIT(30)
++#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
++#define HSDMA_DESC_TAG BIT(15)
++#define HSDMA_DESC_LS1 BIT(14)
++#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
++
++/* align 4 bytes */
++#define HSDMA_ALIGN_SIZE 3
++/* align size 128bytes */
++#define HSDMA_MAX_PLEN 0x3f80
++
++struct hsdma_desc {
++ u32 addr0;
++ u32 flags;
++ u32 addr1;
++ u32 unused;
++};
++
++struct mtk_hsdma_sg {
++ dma_addr_t src_addr;
++ dma_addr_t dst_addr;
++ u32 len;
++};
++
++struct mtk_hsdma_desc {
++ struct virt_dma_desc vdesc;
++ unsigned int num_sgs;
++ struct mtk_hsdma_sg sg[1];
++};
++
++struct mtk_hsdma_chan {
++ struct virt_dma_chan vchan;
++ unsigned int id;
++ dma_addr_t desc_addr;
++ int tx_idx;
++ int rx_idx;
++ struct hsdma_desc *tx_ring;
++ struct hsdma_desc *rx_ring;
++ struct mtk_hsdma_desc *desc;
++ unsigned int next_sg;
++};
++
++struct mtk_hsdam_engine {
++ struct dma_device ddev;
++ struct device_dma_parameters dma_parms;
++ void __iomem *base;
++ struct tasklet_struct task;
++ volatile unsigned long chan_issued;
++
++ struct mtk_hsdma_chan chan[1];
++};
++
++static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
++ struct mtk_hsdma_chan *chan)
++{
++ return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
++ ddev);
++}
++
++static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
++{
++ return container_of(c, struct mtk_hsdma_chan, vchan.chan);
++}
++
++static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
++ struct virt_dma_desc *vdesc)
++{
++ return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
++}
++
++static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
++{
++ return readl(hsdma->base + reg);
++}
++
++static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
++ unsigned int reg, u32 val)
++{
++ writel(val, hsdma->base + reg);
++}
++
++static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ chan->tx_idx = 0;
++ chan->rx_idx = HSDMA_DESCS_NUM - 1;
++
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++ 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++ 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
++}
++
++static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
++{
++ dev_dbg(hsdma->ddev.dev,
++ "tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
++
++ dev_dbg(hsdma->ddev.dev,
++ "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
++ mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
++ mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
++ mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
++}
++
++static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ struct hsdma_desc *tx_desc;
++ struct hsdma_desc *rx_desc;
++ int i;
++
++ dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
++ chan->tx_idx, chan->rx_idx);
++
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ tx_desc = &chan->tx_ring[i];
++ rx_desc = &chan->rx_ring[i];
++
++ dev_dbg(hsdma->ddev.dev,
++ "%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
++ i, tx_desc->addr0, tx_desc->flags,
++ tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
++ }
++}
++
++static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ int i;
++
++ /* disable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++ /* disable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++ /* init desc value */
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ chan->tx_ring[i].addr0 = 0;
++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++ }
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ chan->rx_ring[i].addr0 = 0;
++ chan->rx_ring[i].flags = 0;
++ }
++
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++
++ /* enable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++ /* enable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++}
++
++static int mtk_hsdma_terminate_all(struct dma_chan *c)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++ unsigned long timeout;
++ LIST_HEAD(head);
++
++ spin_lock_bh(&chan->vchan.lock);
++ chan->desc = NULL;
++ clear_bit(chan->id, &hsdma->chan_issued);
++ vchan_get_all_descriptors(&chan->vchan, &head);
++ spin_unlock_bh(&chan->vchan.lock);
++
++ vchan_dma_desc_free_list(&chan->vchan, &head);
++
++ /* wait dma transfer complete */
++ timeout = jiffies + msecs_to_jiffies(2000);
++ while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
++ (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
++ if (time_after_eq(jiffies, timeout)) {
++ hsdma_dump_desc(hsdma, chan);
++ mtk_hsdma_reset(hsdma, chan);
++ dev_err(hsdma->ddev.dev, "timeout, reset it\n");
++ break;
++ }
++ cpu_relax();
++ }
++
++ return 0;
++}
++
++static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ dma_addr_t src, dst;
++ size_t len, tlen;
++ struct hsdma_desc *tx_desc, *rx_desc;
++ struct mtk_hsdma_sg *sg;
++ unsigned int i;
++ int rx_idx;
++
++ sg = &chan->desc->sg[0];
++ len = sg->len;
++ chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
++
++ /* tx desc */
++ src = sg->src_addr;
++ for (i = 0; i < chan->desc->num_sgs; i++) {
++ tx_desc = &chan->tx_ring[chan->tx_idx];
++
++ if (len > HSDMA_MAX_PLEN)
++ tlen = HSDMA_MAX_PLEN;
++ else
++ tlen = len;
++
++ if (i & 0x1) {
++ tx_desc->addr1 = src;
++ tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
++ } else {
++ tx_desc->addr0 = src;
++ tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++ /* update index */
++ chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
++ }
++
++ src += tlen;
++ len -= tlen;
++ }
++ if (i & 0x1)
++ tx_desc->flags |= HSDMA_DESC_LS0;
++ else
++ tx_desc->flags |= HSDMA_DESC_LS1;
++
++ /* rx desc */
++ rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++ len = sg->len;
++ dst = sg->dst_addr;
++ for (i = 0; i < chan->desc->num_sgs; i++) {
++ rx_desc = &chan->rx_ring[rx_idx];
++ if (len > HSDMA_MAX_PLEN)
++ tlen = HSDMA_MAX_PLEN;
++ else
++ tlen = len;
++
++ rx_desc->addr0 = dst;
++ rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++ dst += tlen;
++ len -= tlen;
++
++ /* update index */
++ rx_idx = HSDMA_NEXT_DESC(rx_idx);
++ }
++
++ /* make sure desc and index all up to date */
++ wmb();
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++
++ return 0;
++}
++
++static int gdma_next_desc(struct mtk_hsdma_chan *chan)
++{
++ struct virt_dma_desc *vdesc;
++
++ vdesc = vchan_next_desc(&chan->vchan);
++ if (!vdesc) {
++ chan->desc = NULL;
++ return 0;
++ }
++ chan->desc = to_mtk_hsdma_desc(vdesc);
++ chan->next_sg = 0;
++
++ return 1;
++}
++
++static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ struct mtk_hsdma_desc *desc;
++ int chan_issued;
++
++ chan_issued = 0;
++ spin_lock_bh(&chan->vchan.lock);
++ desc = chan->desc;
++ if (likely(desc)) {
++ if (chan->next_sg == desc->num_sgs) {
++ list_del(&desc->vdesc.node);
++ vchan_cookie_complete(&desc->vdesc);
++ chan_issued = gdma_next_desc(chan);
++ }
++ } else {
++ dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
++ }
++
++ if (chan_issued)
++ set_bit(chan->id, &hsdma->chan_issued);
++ spin_unlock_bh(&chan->vchan.lock);
++}
++
++static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
++{
++ struct mtk_hsdam_engine *hsdma = devid;
++ u32 status;
++
++ status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
++ if (unlikely(!status))
++ return IRQ_NONE;
++
++ if (likely(status & HSDMA_INT_RX_Q0))
++ tasklet_schedule(&hsdma->task);
++ else
++ dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
++ /* clean intr bits */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
++
++ return IRQ_HANDLED;
++}
++
++static void mtk_hsdma_issue_pending(struct dma_chan *c)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++
++ spin_lock_bh(&chan->vchan.lock);
++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
++ if (gdma_next_desc(chan)) {
++ set_bit(chan->id, &hsdma->chan_issued);
++ tasklet_schedule(&hsdma->task);
++ } else {
++ dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
++ }
++ }
++ spin_unlock_bh(&chan->vchan.lock);
++}
++
++static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
++ struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
++ size_t len, unsigned long flags)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdma_desc *desc;
++
++ if (len <= 0)
++ return NULL;
++
++ desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
++ if (!desc) {
++ dev_err(c->device->dev, "alloc memcpy decs error\n");
++ return NULL;
++ }
++
++ desc->sg[0].src_addr = src;
++ desc->sg[0].dst_addr = dest;
++ desc->sg[0].len = len;
++
++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++}
++
++static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
++ dma_cookie_t cookie,
++ struct dma_tx_state *state)
++{
++ return dma_cookie_status(c, cookie, state);
++}
++
++static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
++{
++ vchan_free_chan_resources(to_virt_chan(c));
++}
++
++static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
++{
++ kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
++}
++
++static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++
++ if (test_and_clear_bit(0, &hsdma->chan_issued)) {
++ chan = &hsdma->chan[0];
++ if (chan->desc)
++ mtk_hsdma_start_transfer(hsdma, chan);
++ else
++ dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
++ }
++}
++
++static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++ int next_idx, drx_idx, cnt;
++
++ chan = &hsdma->chan[0];
++ next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++ drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
++
++ cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
++ if (!cnt)
++ return;
++
++ chan->next_sg += cnt;
++ chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
++
++ /* update rx crx */
++ wmb();
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++ mtk_hsdma_chan_done(hsdma, chan);
++}
++
++static void mtk_hsdma_tasklet(struct tasklet_struct *t)
++{
++ struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
++
++ mtk_hsdma_rx(hsdma);
++ mtk_hsdma_tx(hsdma);
++}
++
++static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ int i;
++
++ chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
++ 2 * HSDMA_DESCS_NUM *
++ sizeof(*chan->tx_ring),
++ &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
++ if (!chan->tx_ring)
++ goto no_mem;
++
++ chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
++
++ /* init tx ring value */
++ for (i = 0; i < HSDMA_DESCS_NUM; i++)
++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++
++ return 0;
++no_mem:
++ return -ENOMEM;
++}
++
++static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ if (chan->tx_ring) {
++ dma_free_coherent(hsdma->ddev.dev,
++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
++ chan->tx_ring, chan->desc_addr);
++ chan->tx_ring = NULL;
++ chan->rx_ring = NULL;
++ }
++}
++
++static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++ int ret;
++ u32 reg;
++
++ /* init desc */
++ chan = &hsdma->chan[0];
++ ret = mtk_hsdam_alloc_desc(hsdma, chan);
++ if (ret)
++ return ret;
++
++ /* tx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
++ /* rx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
++ (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++
++ /* enable rx intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++ /* enable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++
++ /* hardware info */
++ reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
++ dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
++ (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
++ (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
++
++ hsdma_dump_reg(hsdma);
++
++ return ret;
++}
++
++static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++
++ /* disable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++ /* disable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++ /* free desc */
++ chan = &hsdma->chan[0];
++ mtk_hsdam_free_desc(hsdma, chan);
++
++ /* tx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
++ /* rx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++}
++
++static const struct of_device_id mtk_hsdma_of_match[] = {
++ { .compatible = "mediatek,mt7621-hsdma" },
++ { },
++};
++
++static int mtk_hsdma_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *match;
++ struct mtk_hsdma_chan *chan;
++ struct mtk_hsdam_engine *hsdma;
++ struct dma_device *dd;
++ int ret;
++ int irq;
++ void __iomem *base;
++
++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
++ if (!match)
++ return -EINVAL;
++
++ hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
++ if (!hsdma)
++ return -EINVAL;
++
++ base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++ hsdma->base = base + HSDMA_BASE_OFFSET;
++ tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return -EINVAL;
++ ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
++ 0, dev_name(&pdev->dev), hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ device_reset(&pdev->dev);
++
++ dd = &hsdma->ddev;
++ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
++ dd->copy_align = HSDMA_ALIGN_SIZE;
++ dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
++ dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
++ dd->device_terminate_all = mtk_hsdma_terminate_all;
++ dd->device_tx_status = mtk_hsdma_tx_status;
++ dd->device_issue_pending = mtk_hsdma_issue_pending;
++ dd->dev = &pdev->dev;
++ dd->dev->dma_parms = &hsdma->dma_parms;
++ dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
++ INIT_LIST_HEAD(&dd->channels);
++
++ chan = &hsdma->chan[0];
++ chan->id = 0;
++ chan->vchan.desc_free = mtk_hsdma_desc_free;
++ vchan_init(&chan->vchan, dd);
++
++ /* init hardware */
++ ret = mtk_hsdma_init(hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to alloc ring descs\n");
++ return ret;
++ }
++
++ ret = dma_async_device_register(dd);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register dma device\n");
++ goto err_uninit_hsdma;
++ }
++
++ ret = of_dma_controller_register(pdev->dev.of_node,
++ of_dma_xlate_by_chan_id, hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register of dma controller\n");
++ goto err_unregister;
++ }
++
++ platform_set_drvdata(pdev, hsdma);
++
++ return 0;
++
++err_unregister:
++ dma_async_device_unregister(dd);
++err_uninit_hsdma:
++ mtk_hsdma_uninit(hsdma);
++ return ret;
++}
++
++static int mtk_hsdma_remove(struct platform_device *pdev)
++{
++ struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
++
++ mtk_hsdma_uninit(hsdma);
++
++ of_dma_controller_free(pdev->dev.of_node);
++ dma_async_device_unregister(&hsdma->ddev);
++
++ return 0;
++}
++
++static struct platform_driver mtk_hsdma_driver = {
++ .probe = mtk_hsdma_probe,
++ .remove = mtk_hsdma_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = mtk_hsdma_of_match,
++ },
++};
++module_platform_driver(mtk_hsdma_driver);
++
++MODULE_AUTHOR("Michael Lee ");
++MODULE_DESCRIPTION("MTK HSDMA driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
+deleted file mode 100644
+index bc4bb43743131..0000000000000
+--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
++++ /dev/null
+@@ -1,760 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * Copyright (C) 2015, Michael Lee
+- * MTK HSDMA support
+- */
+-
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-
+-#include "virt-dma.h"
+-
+-#define HSDMA_BASE_OFFSET 0x800
+-
+-#define HSDMA_REG_TX_BASE 0x00
+-#define HSDMA_REG_TX_CNT 0x04
+-#define HSDMA_REG_TX_CTX 0x08
+-#define HSDMA_REG_TX_DTX 0x0c
+-#define HSDMA_REG_RX_BASE 0x100
+-#define HSDMA_REG_RX_CNT 0x104
+-#define HSDMA_REG_RX_CRX 0x108
+-#define HSDMA_REG_RX_DRX 0x10c
+-#define HSDMA_REG_INFO 0x200
+-#define HSDMA_REG_GLO_CFG 0x204
+-#define HSDMA_REG_RST_CFG 0x208
+-#define HSDMA_REG_DELAY_INT 0x20c
+-#define HSDMA_REG_FREEQ_THRES 0x210
+-#define HSDMA_REG_INT_STATUS 0x220
+-#define HSDMA_REG_INT_MASK 0x228
+-#define HSDMA_REG_SCH_Q01 0x280
+-#define HSDMA_REG_SCH_Q23 0x284
+-
+-#define HSDMA_DESCS_MAX 0xfff
+-#define HSDMA_DESCS_NUM 8
+-#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
+-#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
+-
+-/* HSDMA_REG_INFO */
+-#define HSDMA_INFO_INDEX_MASK 0xf
+-#define HSDMA_INFO_INDEX_SHIFT 24
+-#define HSDMA_INFO_BASE_MASK 0xff
+-#define HSDMA_INFO_BASE_SHIFT 16
+-#define HSDMA_INFO_RX_MASK 0xff
+-#define HSDMA_INFO_RX_SHIFT 8
+-#define HSDMA_INFO_TX_MASK 0xff
+-#define HSDMA_INFO_TX_SHIFT 0
+-
+-/* HSDMA_REG_GLO_CFG */
+-#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
+-#define HSDMA_GLO_CLK_GATE BIT(30)
+-#define HSDMA_GLO_BYTE_SWAP BIT(29)
+-#define HSDMA_GLO_MULTI_DMA BIT(10)
+-#define HSDMA_GLO_TWO_BUF BIT(9)
+-#define HSDMA_GLO_32B_DESC BIT(8)
+-#define HSDMA_GLO_BIG_ENDIAN BIT(7)
+-#define HSDMA_GLO_TX_DONE BIT(6)
+-#define HSDMA_GLO_BT_MASK 0x3
+-#define HSDMA_GLO_BT_SHIFT 4
+-#define HSDMA_GLO_RX_BUSY BIT(3)
+-#define HSDMA_GLO_RX_DMA BIT(2)
+-#define HSDMA_GLO_TX_BUSY BIT(1)
+-#define HSDMA_GLO_TX_DMA BIT(0)
+-
+-#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
+-
+-#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
+- HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
+-
+-/* HSDMA_REG_RST_CFG */
+-#define HSDMA_RST_RX_SHIFT 16
+-#define HSDMA_RST_TX_SHIFT 0
+-
+-/* HSDMA_REG_DELAY_INT */
+-#define HSDMA_DELAY_INT_EN BIT(15)
+-#define HSDMA_DELAY_PEND_OFFSET 8
+-#define HSDMA_DELAY_TIME_OFFSET 0
+-#define HSDMA_DELAY_TX_OFFSET 16
+-#define HSDMA_DELAY_RX_OFFSET 0
+-
+-#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
+- ((x) << HSDMA_DELAY_PEND_OFFSET))
+-#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
+- HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
+-
+-/* HSDMA_REG_INT_STATUS */
+-#define HSDMA_INT_DELAY_RX_COH BIT(31)
+-#define HSDMA_INT_DELAY_RX_INT BIT(30)
+-#define HSDMA_INT_DELAY_TX_COH BIT(29)
+-#define HSDMA_INT_DELAY_TX_INT BIT(28)
+-#define HSDMA_INT_RX_MASK 0x3
+-#define HSDMA_INT_RX_SHIFT 16
+-#define HSDMA_INT_RX_Q0 BIT(16)
+-#define HSDMA_INT_TX_MASK 0xf
+-#define HSDMA_INT_TX_SHIFT 0
+-#define HSDMA_INT_TX_Q0 BIT(0)
+-
+-/* tx/rx dma desc flags */
+-#define HSDMA_PLEN_MASK 0x3fff
+-#define HSDMA_DESC_DONE BIT(31)
+-#define HSDMA_DESC_LS0 BIT(30)
+-#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
+-#define HSDMA_DESC_TAG BIT(15)
+-#define HSDMA_DESC_LS1 BIT(14)
+-#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
+-
+-/* align 4 bytes */
+-#define HSDMA_ALIGN_SIZE 3
+-/* align size 128bytes */
+-#define HSDMA_MAX_PLEN 0x3f80
+-
+-struct hsdma_desc {
+- u32 addr0;
+- u32 flags;
+- u32 addr1;
+- u32 unused;
+-};
+-
+-struct mtk_hsdma_sg {
+- dma_addr_t src_addr;
+- dma_addr_t dst_addr;
+- u32 len;
+-};
+-
+-struct mtk_hsdma_desc {
+- struct virt_dma_desc vdesc;
+- unsigned int num_sgs;
+- struct mtk_hsdma_sg sg[1];
+-};
+-
+-struct mtk_hsdma_chan {
+- struct virt_dma_chan vchan;
+- unsigned int id;
+- dma_addr_t desc_addr;
+- int tx_idx;
+- int rx_idx;
+- struct hsdma_desc *tx_ring;
+- struct hsdma_desc *rx_ring;
+- struct mtk_hsdma_desc *desc;
+- unsigned int next_sg;
+-};
+-
+-struct mtk_hsdam_engine {
+- struct dma_device ddev;
+- struct device_dma_parameters dma_parms;
+- void __iomem *base;
+- struct tasklet_struct task;
+- volatile unsigned long chan_issued;
+-
+- struct mtk_hsdma_chan chan[1];
+-};
+-
+-static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
+- struct mtk_hsdma_chan *chan)
+-{
+- return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
+- ddev);
+-}
+-
+-static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
+-{
+- return container_of(c, struct mtk_hsdma_chan, vchan.chan);
+-}
+-
+-static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
+- struct virt_dma_desc *vdesc)
+-{
+- return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
+-}
+-
+-static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
+-{
+- return readl(hsdma->base + reg);
+-}
+-
+-static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
+- unsigned int reg, u32 val)
+-{
+- writel(val, hsdma->base + reg);
+-}
+-
+-static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- chan->tx_idx = 0;
+- chan->rx_idx = HSDMA_DESCS_NUM - 1;
+-
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
+-
+- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
+- 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
+- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
+- 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
+-}
+-
+-static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
+-{
+- dev_dbg(hsdma->ddev.dev,
+- "tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
+-
+- dev_dbg(hsdma->ddev.dev,
+- "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
+- mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
+- mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
+- mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
+- mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
+- mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
+-}
+-
+-static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- struct hsdma_desc *tx_desc;
+- struct hsdma_desc *rx_desc;
+- int i;
+-
+- dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
+- chan->tx_idx, chan->rx_idx);
+-
+- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+- tx_desc = &chan->tx_ring[i];
+- rx_desc = &chan->rx_ring[i];
+-
+- dev_dbg(hsdma->ddev.dev,
+- "%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
+- i, tx_desc->addr0, tx_desc->flags,
+- tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
+- }
+-}
+-
+-static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- int i;
+-
+- /* disable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
+-
+- /* disable intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
+-
+- /* init desc value */
+- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+- chan->tx_ring[i].addr0 = 0;
+- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
+- }
+- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+- chan->rx_ring[i].addr0 = 0;
+- chan->rx_ring[i].flags = 0;
+- }
+-
+- /* reset */
+- mtk_hsdma_reset_chan(hsdma, chan);
+-
+- /* enable intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
+-
+- /* enable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
+-}
+-
+-static int mtk_hsdma_terminate_all(struct dma_chan *c)
+-{
+- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
+- unsigned long timeout;
+- LIST_HEAD(head);
+-
+- spin_lock_bh(&chan->vchan.lock);
+- chan->desc = NULL;
+- clear_bit(chan->id, &hsdma->chan_issued);
+- vchan_get_all_descriptors(&chan->vchan, &head);
+- spin_unlock_bh(&chan->vchan.lock);
+-
+- vchan_dma_desc_free_list(&chan->vchan, &head);
+-
+- /* wait dma transfer complete */
+- timeout = jiffies + msecs_to_jiffies(2000);
+- while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
+- (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
+- if (time_after_eq(jiffies, timeout)) {
+- hsdma_dump_desc(hsdma, chan);
+- mtk_hsdma_reset(hsdma, chan);
+- dev_err(hsdma->ddev.dev, "timeout, reset it\n");
+- break;
+- }
+- cpu_relax();
+- }
+-
+- return 0;
+-}
+-
+-static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- dma_addr_t src, dst;
+- size_t len, tlen;
+- struct hsdma_desc *tx_desc, *rx_desc;
+- struct mtk_hsdma_sg *sg;
+- unsigned int i;
+- int rx_idx;
+-
+- sg = &chan->desc->sg[0];
+- len = sg->len;
+- chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
+-
+- /* tx desc */
+- src = sg->src_addr;
+- for (i = 0; i < chan->desc->num_sgs; i++) {
+- tx_desc = &chan->tx_ring[chan->tx_idx];
+-
+- if (len > HSDMA_MAX_PLEN)
+- tlen = HSDMA_MAX_PLEN;
+- else
+- tlen = len;
+-
+- if (i & 0x1) {
+- tx_desc->addr1 = src;
+- tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
+- } else {
+- tx_desc->addr0 = src;
+- tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
+-
+- /* update index */
+- chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
+- }
+-
+- src += tlen;
+- len -= tlen;
+- }
+- if (i & 0x1)
+- tx_desc->flags |= HSDMA_DESC_LS0;
+- else
+- tx_desc->flags |= HSDMA_DESC_LS1;
+-
+- /* rx desc */
+- rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
+- len = sg->len;
+- dst = sg->dst_addr;
+- for (i = 0; i < chan->desc->num_sgs; i++) {
+- rx_desc = &chan->rx_ring[rx_idx];
+- if (len > HSDMA_MAX_PLEN)
+- tlen = HSDMA_MAX_PLEN;
+- else
+- tlen = len;
+-
+- rx_desc->addr0 = dst;
+- rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
+-
+- dst += tlen;
+- len -= tlen;
+-
+- /* update index */
+- rx_idx = HSDMA_NEXT_DESC(rx_idx);
+- }
+-
+- /* make sure desc and index all up to date */
+- wmb();
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
+-
+- return 0;
+-}
+-
+-static int gdma_next_desc(struct mtk_hsdma_chan *chan)
+-{
+- struct virt_dma_desc *vdesc;
+-
+- vdesc = vchan_next_desc(&chan->vchan);
+- if (!vdesc) {
+- chan->desc = NULL;
+- return 0;
+- }
+- chan->desc = to_mtk_hsdma_desc(vdesc);
+- chan->next_sg = 0;
+-
+- return 1;
+-}
+-
+-static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- struct mtk_hsdma_desc *desc;
+- int chan_issued;
+-
+- chan_issued = 0;
+- spin_lock_bh(&chan->vchan.lock);
+- desc = chan->desc;
+- if (likely(desc)) {
+- if (chan->next_sg == desc->num_sgs) {
+- list_del(&desc->vdesc.node);
+- vchan_cookie_complete(&desc->vdesc);
+- chan_issued = gdma_next_desc(chan);
+- }
+- } else {
+- dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
+- }
+-
+- if (chan_issued)
+- set_bit(chan->id, &hsdma->chan_issued);
+- spin_unlock_bh(&chan->vchan.lock);
+-}
+-
+-static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
+-{
+- struct mtk_hsdam_engine *hsdma = devid;
+- u32 status;
+-
+- status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
+- if (unlikely(!status))
+- return IRQ_NONE;
+-
+- if (likely(status & HSDMA_INT_RX_Q0))
+- tasklet_schedule(&hsdma->task);
+- else
+- dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
+- /* clean intr bits */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
+-
+- return IRQ_HANDLED;
+-}
+-
+-static void mtk_hsdma_issue_pending(struct dma_chan *c)
+-{
+- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
+-
+- spin_lock_bh(&chan->vchan.lock);
+- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
+- if (gdma_next_desc(chan)) {
+- set_bit(chan->id, &hsdma->chan_issued);
+- tasklet_schedule(&hsdma->task);
+- } else {
+- dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
+- }
+- }
+- spin_unlock_bh(&chan->vchan.lock);
+-}
+-
+-static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
+- struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+- size_t len, unsigned long flags)
+-{
+- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+- struct mtk_hsdma_desc *desc;
+-
+- if (len <= 0)
+- return NULL;
+-
+- desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
+- if (!desc) {
+- dev_err(c->device->dev, "alloc memcpy decs error\n");
+- return NULL;
+- }
+-
+- desc->sg[0].src_addr = src;
+- desc->sg[0].dst_addr = dest;
+- desc->sg[0].len = len;
+-
+- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+-}
+-
+-static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
+- dma_cookie_t cookie,
+- struct dma_tx_state *state)
+-{
+- return dma_cookie_status(c, cookie, state);
+-}
+-
+-static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
+-{
+- vchan_free_chan_resources(to_virt_chan(c));
+-}
+-
+-static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
+-{
+- kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
+-}
+-
+-static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+-
+- if (test_and_clear_bit(0, &hsdma->chan_issued)) {
+- chan = &hsdma->chan[0];
+- if (chan->desc)
+- mtk_hsdma_start_transfer(hsdma, chan);
+- else
+- dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
+- }
+-}
+-
+-static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+- int next_idx, drx_idx, cnt;
+-
+- chan = &hsdma->chan[0];
+- next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
+- drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
+-
+- cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
+- if (!cnt)
+- return;
+-
+- chan->next_sg += cnt;
+- chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
+-
+- /* update rx crx */
+- wmb();
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
+-
+- mtk_hsdma_chan_done(hsdma, chan);
+-}
+-
+-static void mtk_hsdma_tasklet(struct tasklet_struct *t)
+-{
+- struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
+-
+- mtk_hsdma_rx(hsdma);
+- mtk_hsdma_tx(hsdma);
+-}
+-
+-static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- int i;
+-
+- chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
+- 2 * HSDMA_DESCS_NUM *
+- sizeof(*chan->tx_ring),
+- &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
+- if (!chan->tx_ring)
+- goto no_mem;
+-
+- chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
+-
+- /* init tx ring value */
+- for (i = 0; i < HSDMA_DESCS_NUM; i++)
+- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
+-
+- return 0;
+-no_mem:
+- return -ENOMEM;
+-}
+-
+-static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- if (chan->tx_ring) {
+- dma_free_coherent(hsdma->ddev.dev,
+- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+- chan->tx_ring, chan->desc_addr);
+- chan->tx_ring = NULL;
+- chan->rx_ring = NULL;
+- }
+-}
+-
+-static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+- int ret;
+- u32 reg;
+-
+- /* init desc */
+- chan = &hsdma->chan[0];
+- ret = mtk_hsdam_alloc_desc(hsdma, chan);
+- if (ret)
+- return ret;
+-
+- /* tx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
+- /* rx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
+- (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
+- /* reset */
+- mtk_hsdma_reset_chan(hsdma, chan);
+-
+- /* enable rx intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
+-
+- /* enable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
+-
+- /* hardware info */
+- reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
+- dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
+- (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
+- (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
+-
+- hsdma_dump_reg(hsdma);
+-
+- return ret;
+-}
+-
+-static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+-
+- /* disable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
+-
+- /* disable intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
+-
+- /* free desc */
+- chan = &hsdma->chan[0];
+- mtk_hsdam_free_desc(hsdma, chan);
+-
+- /* tx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
+- /* rx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
+- /* reset */
+- mtk_hsdma_reset_chan(hsdma, chan);
+-}
+-
+-static const struct of_device_id mtk_hsdma_of_match[] = {
+- { .compatible = "mediatek,mt7621-hsdma" },
+- { },
+-};
+-
+-static int mtk_hsdma_probe(struct platform_device *pdev)
+-{
+- const struct of_device_id *match;
+- struct mtk_hsdma_chan *chan;
+- struct mtk_hsdam_engine *hsdma;
+- struct dma_device *dd;
+- int ret;
+- int irq;
+- void __iomem *base;
+-
+- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+- if (ret)
+- return ret;
+-
+- match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
+- if (!match)
+- return -EINVAL;
+-
+- hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
+- if (!hsdma)
+- return -EINVAL;
+-
+- base = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(base))
+- return PTR_ERR(base);
+- hsdma->base = base + HSDMA_BASE_OFFSET;
+- tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
+-
+- irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return -EINVAL;
+- ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
+- 0, dev_name(&pdev->dev), hsdma);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to request irq\n");
+- return ret;
+- }
+-
+- device_reset(&pdev->dev);
+-
+- dd = &hsdma->ddev;
+- dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+- dd->copy_align = HSDMA_ALIGN_SIZE;
+- dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
+- dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
+- dd->device_terminate_all = mtk_hsdma_terminate_all;
+- dd->device_tx_status = mtk_hsdma_tx_status;
+- dd->device_issue_pending = mtk_hsdma_issue_pending;
+- dd->dev = &pdev->dev;
+- dd->dev->dma_parms = &hsdma->dma_parms;
+- dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
+- INIT_LIST_HEAD(&dd->channels);
+-
+- chan = &hsdma->chan[0];
+- chan->id = 0;
+- chan->vchan.desc_free = mtk_hsdma_desc_free;
+- vchan_init(&chan->vchan, dd);
+-
+- /* init hardware */
+- ret = mtk_hsdma_init(hsdma);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to alloc ring descs\n");
+- return ret;
+- }
+-
+- ret = dma_async_device_register(dd);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to register dma device\n");
+- goto err_uninit_hsdma;
+- }
+-
+- ret = of_dma_controller_register(pdev->dev.of_node,
+- of_dma_xlate_by_chan_id, hsdma);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to register of dma controller\n");
+- goto err_unregister;
+- }
+-
+- platform_set_drvdata(pdev, hsdma);
+-
+- return 0;
+-
+-err_unregister:
+- dma_async_device_unregister(dd);
+-err_uninit_hsdma:
+- mtk_hsdma_uninit(hsdma);
+- return ret;
+-}
+-
+-static int mtk_hsdma_remove(struct platform_device *pdev)
+-{
+- struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
+-
+- mtk_hsdma_uninit(hsdma);
+-
+- of_dma_controller_free(pdev->dev.of_node);
+- dma_async_device_unregister(&hsdma->ddev);
+-
+- return 0;
+-}
+-
+-static struct platform_driver mtk_hsdma_driver = {
+- .probe = mtk_hsdma_probe,
+- .remove = mtk_hsdma_remove,
+- .driver = {
+- .name = "hsdma-mt7621",
+- .of_match_table = mtk_hsdma_of_match,
+- },
+-};
+-module_platform_driver(mtk_hsdma_driver);
+-
+-MODULE_AUTHOR("Michael Lee ");
+-MODULE_DESCRIPTION("MTK HSDMA driver");
+-MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 43ebd11b53fe5..efad43d8e465d 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
+ {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
+ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
++ {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+ {} /* Terminating entry */
+ };
+diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+index 2833fc6901e6e..3f04b7a954ba0 100644
+--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
++++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+@@ -34,7 +34,7 @@
+ NL80211_RRF_PASSIVE_SCAN)
+
+ static const struct ieee80211_regdomain rtw_regdom_rd = {
+- .n_reg_rules = 3,
++ .n_reg_rules = 2,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTW_2GHZ_CH01_11,
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index f500a70438056..2ca5805b2fce0 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -958,7 +958,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
+ struct vchiq_service *service;
+ struct bulk_waiter_node *waiter = NULL;
+ bool found = false;
+- void *userdata = NULL;
++ void *userdata;
+ int status = 0;
+ int ret;
+
+@@ -997,6 +997,8 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
+ "found bulk_waiter %pK for pid %d", waiter,
+ current->pid);
+ userdata = &waiter->bulk_waiter;
++ } else {
++ userdata = args->userdata;
+ }
+
+ /*
+@@ -1715,7 +1717,7 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
+ {
+ struct vchiq_queue_bulk_transfer32 args32;
+ struct vchiq_queue_bulk_transfer args;
+- enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
++ enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ?
+ VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
+
+ if (copy_from_user(&args32, argp, sizeof(args32)))
+diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
+index 36b36ef39d053..77fb104efdec1 100644
+--- a/drivers/staging/wfx/data_tx.c
++++ b/drivers/staging/wfx/data_tx.c
+@@ -331,6 +331,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ {
+ struct hif_msg *hif_msg;
+ struct hif_req_tx *req;
++ struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+@@ -344,11 +345,14 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+
+ // From now tx_info->control is unusable
+ memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
++ // Fill tx_priv
++ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
++ tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
+
+ // Fill hif_msg
+ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ WARN(offset & 1, "attempt to transmit an unaligned frame");
+- skb_put(skb, wfx_tx_get_icv_len(hw_key));
++ skb_put(skb, tx_priv->icv_size);
+ skb_push(skb, wmsg_len);
+ memset(skb->data, 0, wmsg_len);
+ hif_msg = (struct hif_msg *)skb->data;
+@@ -484,6 +488,7 @@ static void wfx_tx_fill_rates(struct wfx_dev *wdev,
+
+ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
+ {
++ const struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info;
+ struct wfx_vif *wvif;
+ struct sk_buff *skb;
+@@ -495,6 +500,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
++ tx_priv = wfx_skb_tx_priv(skb);
+ wvif = wdev_to_wvif(wdev, ((struct hif_msg *)skb->data)->interface);
+ WARN_ON(!wvif);
+ if (!wvif)
+@@ -503,6 +509,8 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
+ // Note that wfx_pending_get_pkt_us_delay() get data from tx_info
+ _trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
+ wfx_tx_fill_rates(wdev, tx_info, arg);
++ skb_trim(skb, skb->len - tx_priv->icv_size);
++
+ // From now, you can touch to tx_info->status, but do not touch to
+ // tx_priv anymore
+ // FIXME: use ieee80211_tx_info_clear_status()
+diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
+index 46c9fff7a870e..401363d6b563a 100644
+--- a/drivers/staging/wfx/data_tx.h
++++ b/drivers/staging/wfx/data_tx.h
+@@ -35,6 +35,7 @@ struct tx_policy_cache {
+
+ struct wfx_tx_priv {
+ ktime_t xmit_timestamp;
++ unsigned char icv_size;
+ };
+
+ void wfx_tx_policy_init(struct wfx_vif *wvif);
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+index 9b3eb2e8c92ad..b926e1d6c7b8e 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+@@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+ length += sizeof(struct cpl_tx_data_iso);
+
+-#define MAX_IMM_TX_PKT_LEN 256
+- return length <= MAX_IMM_TX_PKT_LEN;
++ return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+
+ /*
+diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
+index 1e3614e4798f0..6cbb3643c6c48 100644
+--- a/drivers/tee/optee/rpc.c
++++ b/drivers/tee/optee/rpc.c
+@@ -54,8 +54,9 @@ bad:
+ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+ {
+- struct i2c_client client = { 0 };
+ struct tee_param *params;
++ struct i2c_adapter *adapter;
++ struct i2c_msg msg = { };
+ size_t i;
+ int ret = -EOPNOTSUPP;
+ u8 attr[] = {
+@@ -85,48 +86,48 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
+ goto bad;
+ }
+
+- client.adapter = i2c_get_adapter(params[0].u.value.b);
+- if (!client.adapter)
++ adapter = i2c_get_adapter(params[0].u.value.b);
++ if (!adapter)
+ goto bad;
+
+ if (params[1].u.value.a & OPTEE_MSG_RPC_CMD_I2C_FLAGS_TEN_BIT) {
+- if (!i2c_check_functionality(client.adapter,
++ if (!i2c_check_functionality(adapter,
+ I2C_FUNC_10BIT_ADDR)) {
+- i2c_put_adapter(client.adapter);
++ i2c_put_adapter(adapter);
+ goto bad;
+ }
+
+- client.flags = I2C_CLIENT_TEN;
++ msg.flags = I2C_M_TEN;
+ }
+
+- client.addr = params[0].u.value.c;
+- snprintf(client.name, I2C_NAME_SIZE, "i2c%d", client.adapter->nr);
++ msg.addr = params[0].u.value.c;
++ msg.buf = params[2].u.memref.shm->kaddr;
++ msg.len = params[2].u.memref.size;
+
+ switch (params[0].u.value.a) {
+ case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_RD:
+- ret = i2c_master_recv(&client, params[2].u.memref.shm->kaddr,
+- params[2].u.memref.size);
++ msg.flags |= I2C_M_RD;
+ break;
+ case OPTEE_MSG_RPC_CMD_I2C_TRANSFER_WR:
+- ret = i2c_master_send(&client, params[2].u.memref.shm->kaddr,
+- params[2].u.memref.size);
+ break;
+ default:
+- i2c_put_adapter(client.adapter);
++ i2c_put_adapter(adapter);
+ goto bad;
+ }
+
++ ret = i2c_transfer(adapter, &msg, 1);
++
+ if (ret < 0) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ } else {
+- params[3].u.value.a = ret;
++ params[3].u.value.a = msg.len;
+ if (optee_to_msg_param(arg->params, arg->num_params, params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ else
+ arg->ret = TEEC_SUCCESS;
+ }
+
+- i2c_put_adapter(client.adapter);
++ i2c_put_adapter(adapter);
+ kfree(params);
+ return;
+ bad:
+diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
+index 612f063c1cfcd..ddc166e3a93eb 100644
+--- a/drivers/thermal/cpufreq_cooling.c
++++ b/drivers/thermal/cpufreq_cooling.c
+@@ -441,7 +441,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
+ frequency = get_state_freq(cpufreq_cdev, state);
+
+ ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
+- if (ret > 0) {
++ if (ret >= 0) {
+ cpufreq_cdev->cpufreq_state = state;
+ cpus = cpufreq_cdev->policy->cpus;
+ max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index c676fa89ee0b6..51dafc06f5414 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2559,7 +2559,8 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
+ */
+
+ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t nr)
++ unsigned char *buf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ return -EOPNOTSUPP;
+ }
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 12557ee1edb68..1363e659dc1db 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -416,13 +416,19 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
+ * Returns the number of bytes returned or error code.
+ */
+ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+- __u8 __user *buf, size_t nr)
++ __u8 *kbuf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ struct n_hdlc *n_hdlc = tty->disc_data;
+ int ret = 0;
+ struct n_hdlc_buf *rbuf;
+ DECLARE_WAITQUEUE(wait, current);
+
++ /* Is this a repeated call for an rbuf we already found earlier? */
++ rbuf = *cookie;
++ if (rbuf)
++ goto have_rbuf;
++
+ add_wait_queue(&tty->read_wait, &wait);
+
+ for (;;) {
+@@ -436,25 +442,8 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
+- if (rbuf) {
+- if (rbuf->count > nr) {
+- /* too large for caller's buffer */
+- ret = -EOVERFLOW;
+- } else {
+- __set_current_state(TASK_RUNNING);
+- if (copy_to_user(buf, rbuf->buf, rbuf->count))
+- ret = -EFAULT;
+- else
+- ret = rbuf->count;
+- }
+-
+- if (n_hdlc->rx_free_buf_list.count >
+- DEFAULT_RX_BUF_COUNT)
+- kfree(rbuf);
+- else
+- n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
++ if (rbuf)
+ break;
+- }
+
+ /* no data */
+ if (tty_io_nonblock(tty, file)) {
+@@ -473,6 +462,39 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ remove_wait_queue(&tty->read_wait, &wait);
+ __set_current_state(TASK_RUNNING);
+
++ if (!rbuf)
++ return ret;
++ *cookie = rbuf;
++
++have_rbuf:
++ /* Have we used it up entirely? */
++ if (offset >= rbuf->count)
++ goto done_with_rbuf;
++
++ /* More data to go, but can't copy any more? EOVERFLOW */
++ ret = -EOVERFLOW;
++ if (!nr)
++ goto done_with_rbuf;
++
++ /* Copy as much data as possible */
++ ret = rbuf->count - offset;
++ if (ret > nr)
++ ret = nr;
++ memcpy(kbuf, rbuf->buf+offset, ret);
++ offset += ret;
++
++ /* If we still have data left, we leave the rbuf in the cookie */
++ if (offset < rbuf->count)
++ return ret;
++
++done_with_rbuf:
++ *cookie = NULL;
++
++ if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
++ kfree(rbuf);
++ else
++ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
++
+ return ret;
+
+ } /* end of n_hdlc_tty_read() */
+diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
+index 96feabae47407..ce03ae78f5c6a 100644
+--- a/drivers/tty/n_null.c
++++ b/drivers/tty/n_null.c
+@@ -20,7 +20,8 @@ static void n_null_close(struct tty_struct *tty)
+ }
+
+ static ssize_t n_null_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user * buf, size_t nr)
++ unsigned char *buf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ return -EOPNOTSUPP;
+ }
+diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
+index 934dd2fb2ec80..3161f0a535e37 100644
+--- a/drivers/tty/n_r3964.c
++++ b/drivers/tty/n_r3964.c
+@@ -129,7 +129,7 @@ static void remove_client_block(struct r3964_info *pInfo,
+ static int r3964_open(struct tty_struct *tty);
+ static void r3964_close(struct tty_struct *tty);
+ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user * buf, size_t nr);
++ void *cookie, unsigned char *buf, size_t nr);
+ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr);
+ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
+@@ -1058,7 +1058,8 @@ static void r3964_close(struct tty_struct *tty)
+ }
+
+ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user * buf, size_t nr)
++ unsigned char *kbuf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ struct r3964_info *pInfo = tty->disc_data;
+ struct r3964_client_info *pClient;
+@@ -1109,10 +1110,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
+ kfree(pMsg);
+ TRACE_M("r3964_read - msg kfree %p", pMsg);
+
+- if (copy_to_user(buf, &theMsg, ret)) {
+- ret = -EFAULT;
+- goto unlock;
+- }
++ memcpy(kbuf, &theMsg, ret);
+
+ TRACE_PS("read - return %d", ret);
+ goto unlock;
+diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c
+index 4479af4d2fa5c..3490ed51b1a3c 100644
+--- a/drivers/tty/n_tracerouter.c
++++ b/drivers/tty/n_tracerouter.c
+@@ -118,7 +118,9 @@ static void n_tracerouter_close(struct tty_struct *tty)
+ * -EINVAL
+ */
+ static ssize_t n_tracerouter_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t nr) {
++ unsigned char *buf, size_t nr,
++ void **cookie, unsigned long offset)
++{
+ return -EINVAL;
+ }
+
+diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c
+index d96ba82cc3569..1d9931041fd8b 100644
+--- a/drivers/tty/n_tracesink.c
++++ b/drivers/tty/n_tracesink.c
+@@ -115,7 +115,9 @@ static void n_tracesink_close(struct tty_struct *tty)
+ * -EINVAL
+ */
+ static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t nr) {
++ unsigned char *buf, size_t nr,
++ void **cookie, unsigned long offset)
++{
+ return -EINVAL;
+ }
+
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 219e85756171b..0bd32ae8a269d 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -164,29 +164,24 @@ static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
+ memset(buffer, 0x00, size);
+ }
+
+-static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
+- size_t tail, size_t n)
++static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t size = N_TTY_BUF_SIZE - tail;
+ void *from = read_buf_addr(ldata, tail);
+- int uncopied;
+
+ if (n > size) {
+ tty_audit_add_data(tty, from, size);
+- uncopied = copy_to_user(to, from, size);
+- zero_buffer(tty, from, size - uncopied);
+- if (uncopied)
+- return uncopied;
++ memcpy(to, from, size);
++ zero_buffer(tty, from, size);
+ to += size;
+ n -= size;
+ from = ldata->read_buf;
+ }
+
+ tty_audit_add_data(tty, from, n);
+- uncopied = copy_to_user(to, from, n);
+- zero_buffer(tty, from, n - uncopied);
+- return uncopied;
++ memcpy(to, from, n);
++ zero_buffer(tty, from, n);
+ }
+
+ /**
+@@ -1944,15 +1939,16 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ /**
+ * copy_from_read_buf - copy read data directly
+ * @tty: terminal device
+- * @b: user data
++ * @kbp: data
+ * @nr: size of data
+ *
+ * Helper function to speed up n_tty_read. It is only called when
+- * ICANON is off; it copies characters straight from the tty queue to
+- * user space directly. It can be profitably called twice; once to
+- * drain the space from the tail pointer to the (physical) end of the
+- * buffer, and once to drain the space from the (physical) beginning of
+- * the buffer to head pointer.
++ * ICANON is off; it copies characters straight from the tty queue.
++ *
++ * It can be profitably called twice; once to drain the space from
++ * the tail pointer to the (physical) end of the buffer, and once
++ * to drain the space from the (physical) beginning of the buffer
++ * to head pointer.
+ *
+ * Called under the ldata->atomic_read_lock sem
+ *
+@@ -1962,7 +1958,7 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ */
+
+ static int copy_from_read_buf(struct tty_struct *tty,
+- unsigned char __user **b,
++ unsigned char **kbp,
+ size_t *nr)
+
+ {
+@@ -1978,8 +1974,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ n = min(*nr, n);
+ if (n) {
+ unsigned char *from = read_buf_addr(ldata, tail);
+- retval = copy_to_user(*b, from, n);
+- n -= retval;
++ memcpy(*kbp, from, n);
+ is_eof = n == 1 && *from == EOF_CHAR(tty);
+ tty_audit_add_data(tty, from, n);
+ zero_buffer(tty, from, n);
+@@ -1988,7 +1983,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
+ (head == ldata->read_tail))
+ n = 0;
+- *b += n;
++ *kbp += n;
+ *nr -= n;
+ }
+ return retval;
+@@ -1997,12 +1992,12 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ /**
+ * canon_copy_from_read_buf - copy read data in canonical mode
+ * @tty: terminal device
+- * @b: user data
++ * @kbp: data
+ * @nr: size of data
+ *
+ * Helper function for n_tty_read. It is only called when ICANON is on;
+ * it copies one line of input up to and including the line-delimiting
+- * character into the user-space buffer.
++ * character into the result buffer.
+ *
+ * NB: When termios is changed from non-canonical to canonical mode and
+ * the read buffer contains data, n_tty_set_termios() simulates an EOF
+@@ -2018,14 +2013,14 @@ static int copy_from_read_buf(struct tty_struct *tty,
+ */
+
+ static int canon_copy_from_read_buf(struct tty_struct *tty,
+- unsigned char __user **b,
++ unsigned char **kbp,
+ size_t *nr)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t n, size, more, c;
+ size_t eol;
+ size_t tail;
+- int ret, found = 0;
++ int found = 0;
+
+ /* N.B. avoid overrun if nr == 0 */
+ if (!*nr)
+@@ -2061,10 +2056,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
+ n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
+ __func__, eol, found, n, c, tail, more);
+
+- ret = tty_copy_to_user(tty, *b, tail, n);
+- if (ret)
+- return -EFAULT;
+- *b += n;
++ tty_copy(tty, *kbp, tail, n);
++ *kbp += n;
+ *nr -= n;
+
+ if (found)
+@@ -2129,10 +2122,11 @@ static int job_control(struct tty_struct *tty, struct file *file)
+ */
+
+ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t nr)
++ unsigned char *kbuf, size_t nr,
++ void **cookie, unsigned long offset)
+ {
+ struct n_tty_data *ldata = tty->disc_data;
+- unsigned char __user *b = buf;
++ unsigned char *kb = kbuf;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int c;
+ int minimum, time;
+@@ -2178,17 +2172,13 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ /* First test for status change. */
+ if (packet && tty->link->ctrl_status) {
+ unsigned char cs;
+- if (b != buf)
++ if (kb != kbuf)
+ break;
+ spin_lock_irq(&tty->link->ctrl_lock);
+ cs = tty->link->ctrl_status;
+ tty->link->ctrl_status = 0;
+ spin_unlock_irq(&tty->link->ctrl_lock);
+- if (put_user(cs, b)) {
+- retval = -EFAULT;
+- break;
+- }
+- b++;
++ *kb++ = cs;
+ nr--;
+ break;
+ }
+@@ -2231,24 +2221,20 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ }
+
+ if (ldata->icanon && !L_EXTPROC(tty)) {
+- retval = canon_copy_from_read_buf(tty, &b, &nr);
++ retval = canon_copy_from_read_buf(tty, &kb, &nr);
+ if (retval)
+ break;
+ } else {
+ int uncopied;
+
+ /* Deal with packet mode. */
+- if (packet && b == buf) {
+- if (put_user(TIOCPKT_DATA, b)) {
+- retval = -EFAULT;
+- break;
+- }
+- b++;
++ if (packet && kb == kbuf) {
++ *kb++ = TIOCPKT_DATA;
+ nr--;
+ }
+
+- uncopied = copy_from_read_buf(tty, &b, &nr);
+- uncopied += copy_from_read_buf(tty, &b, &nr);
++ uncopied = copy_from_read_buf(tty, &kb, &nr);
++ uncopied += copy_from_read_buf(tty, &kb, &nr);
+ if (uncopied) {
+ retval = -EFAULT;
+ break;
+@@ -2257,7 +2243,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+
+ n_tty_check_unthrottle(tty);
+
+- if (b - buf >= minimum)
++ if (kb - kbuf >= minimum)
+ break;
+ if (time)
+ timeout = time;
+@@ -2269,8 +2255,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ remove_wait_queue(&tty->read_wait, &wait);
+ mutex_unlock(&ldata->atomic_read_lock);
+
+- if (b - buf)
+- retval = b - buf;
++ if (kb - kbuf)
++ retval = kb - kbuf;
+
+ return retval;
+ }
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index f4de32d3f2afe..6248304a001f4 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -383,17 +383,18 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+
+- if (!desc) {
+- for (i = count; i > 0; i--)
+- stm32_transmit_chars_pio(port);
+- return;
+- }
++ if (!desc)
++ goto fallback_err;
+
+ desc->callback = stm32_tx_dma_complete;
+ desc->callback_param = port;
+
+ /* Push current DMA TX transaction in the pending queue */
+- dmaengine_submit(desc);
++ if (dma_submit_error(dmaengine_submit(desc))) {
++ /* dma no yet started, safe to free resources */
++ dmaengine_terminate_async(stm32port->tx_ch);
++ goto fallback_err;
++ }
+
+ /* Issue pending DMA TX requests */
+ dma_async_issue_pending(stm32port->tx_ch);
+@@ -402,6 +403,11 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ port->icount.tx += count;
++ return;
++
++fallback_err:
++ for (i = count; i > 0; i--)
++ stm32_transmit_chars_pio(port);
+ }
+
+ static void stm32_transmit_chars(struct uart_port *port)
+@@ -1130,7 +1136,11 @@ static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
+ desc->callback_param = NULL;
+
+ /* Push current DMA transaction in the pending queue */
+- dmaengine_submit(desc);
++ ret = dma_submit_error(dmaengine_submit(desc));
++ if (ret) {
++ dmaengine_terminate_sync(stm32port->rx_ch);
++ goto config_err;
++ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(stm32port->rx_ch);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 082da38762fc7..623738d8e32c8 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -142,7 +142,7 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
+ /* Mutex to protect creating and releasing a tty */
+ DEFINE_MUTEX(tty_mutex);
+
+-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
++static ssize_t tty_read(struct kiocb *, struct iov_iter *);
+ static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+ static __poll_t tty_poll(struct file *, poll_table *);
+ static int tty_open(struct inode *, struct file *);
+@@ -473,8 +473,9 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
+
+ static const struct file_operations tty_fops = {
+ .llseek = no_llseek,
+- .read = tty_read,
++ .read_iter = tty_read,
+ .write_iter = tty_write,
++ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+@@ -487,8 +488,9 @@ static const struct file_operations tty_fops = {
+
+ static const struct file_operations console_fops = {
+ .llseek = no_llseek,
+- .read = tty_read,
++ .read_iter = tty_read,
+ .write_iter = redirected_tty_write,
++ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .poll = tty_poll,
+ .unlocked_ioctl = tty_ioctl,
+@@ -829,6 +831,65 @@ static void tty_update_time(struct timespec64 *time)
+ time->tv_sec = sec;
+ }
+
++/*
++ * Iterate on the ldisc ->read() function until we've gotten all
++ * the data the ldisc has for us.
++ *
++ * The "cookie" is something that the ldisc read function can fill
++ * in to let us know that there is more data to be had.
++ *
++ * We promise to continue to call the ldisc until it stops returning
++ * data or clears the cookie. The cookie may be something that the
++ * ldisc maintains state for and needs to free.
++ */
++static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
++ struct file *file, struct iov_iter *to)
++{
++ int retval = 0;
++ void *cookie = NULL;
++ unsigned long offset = 0;
++ char kernel_buf[64];
++ size_t count = iov_iter_count(to);
++
++ do {
++ int size, copied;
++
++ size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
++ size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
++ if (!size)
++ break;
++
++ /*
++ * A ldisc read error return will override any previously copied
++ * data (eg -EOVERFLOW from HDLC)
++ */
++ if (size < 0) {
++ memzero_explicit(kernel_buf, sizeof(kernel_buf));
++ return size;
++ }
++
++ copied = copy_to_iter(kernel_buf, size, to);
++ offset += copied;
++ count -= copied;
++
++ /*
++ * If the user copy failed, we still need to do another ->read()
++ * call if we had a cookie to let the ldisc clear up.
++ *
++ * But make sure size is zeroed.
++ */
++ if (unlikely(copied != size)) {
++ count = 0;
++ retval = -EFAULT;
++ }
++ } while (cookie);
++
++ /* We always clear tty buffer in case they contained passwords */
++ memzero_explicit(kernel_buf, sizeof(kernel_buf));
++ return offset ? offset : retval;
++}
++
++
+ /**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+@@ -844,10 +905,10 @@ static void tty_update_time(struct timespec64 *time)
+ * read calls may be outstanding in parallel.
+ */
+
+-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+- loff_t *ppos)
++static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+ {
+ int i;
++ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct tty_struct *tty = file_tty(file);
+ struct tty_ldisc *ld;
+@@ -860,12 +921,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ /* We want to wait for the line discipline to sort out in this
+ situation */
+ ld = tty_ldisc_ref_wait(tty);
+- if (!ld)
+- return hung_up_tty_read(file, buf, count, ppos);
+- if (ld->ops->read)
+- i = ld->ops->read(tty, file, buf, count);
+- else
+- i = -EIO;
++ i = -EIO;
++ if (ld && ld->ops->read)
++ i = iterate_tty_read(ld, tty, file, to);
+ tty_ldisc_deref(ld);
+
+ if (i > 0)
+@@ -2887,7 +2945,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
+
+ static int this_tty(const void *t, struct file *file, unsigned fd)
+ {
+- if (likely(file->f_op->read != tty_read))
++ if (likely(file->f_op->read_iter != tty_read))
+ return 0;
+ return file_tty(file) != t ? 0 : fd + 1;
+ }
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index e9ac215b96633..fc3269f5faf19 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ chan->xfer_len = num_packets * chan->max_packet;
++ } else if (chan->ep_is_in) {
++ /*
++ * Always program an integral # of max packets
++ * for IN transfers.
++ * Note: This assumes that the input buffer is
++ * aligned and sized accordingly.
++ */
++ chan->xfer_len = num_packets * chan->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0 */
+ num_packets = 1;
+ }
+
+- if (chan->ep_is_in)
+- /*
+- * Always program an integral # of max packets for IN
+- * transfers
+- */
+- chan->xfer_len = num_packets * chan->max_packet;
+-
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ /*
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index a052d39b4375e..d5f4ec1b73b15 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
+ &short_read);
+
+ if (urb->actual_length + xfer_length > urb->length) {
+- dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
++ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ xfer_length = urb->length - urb->actual_length;
+ }
+
+@@ -1977,6 +1977,18 @@ error:
+ qtd->error_count++;
+ dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+ qtd, DWC2_HC_XFER_XACT_ERR);
++ /*
++ * We can get here after a completed transaction
++ * (urb->actual_length >= urb->length) which was not reported
++ * as completed. If that is the case, and we do not abort
++ * the transfer, a transfer of size 0 will be enqueued
++ * subsequently. If urb->actual_length is not DMA-aligned,
++ * the buffer will then point to an unaligned address, and
++ * the resulting behavior is undefined. Bail out in that
++ * situation.
++ */
++ if (qtd->urb->actual_length >= qtd->urb->length)
++ qtd->error_count = 3;
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index ee44321fee386..56f7235bc068c 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -605,8 +605,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
+ params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+ if (desc->bInterval) {
+- params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+- dep->interval = 1 << (desc->bInterval - 1);
++ u8 bInterval_m1;
++
++ /*
++ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
++ * must be set to 0 when the controller operates in full-speed.
++ */
++ bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
++ if (dwc->gadget->speed == USB_SPEED_FULL)
++ bInterval_m1 = 0;
++
++ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
++ dwc->gadget->speed == USB_SPEED_FULL)
++ dep->interval = desc->bInterval;
++ else
++ dep->interval = 1 << (desc->bInterval - 1);
++
++ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
+ }
+
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, ¶ms);
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index e6d32c5367812..908e49dafd620 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
+ struct snd_uac_chip *uac = prm->uac;
+
+ /* i/f shutting down */
+- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
++ if (!prm->ep_enabled) {
++ usb_ep_free_request(ep, req);
++ return;
++ }
++
++ if (req->status == -ESHUTDOWN)
+ return;
+
+ /*
+@@ -336,8 +341,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
+
+ for (i = 0; i < params->req_number; i++) {
+ if (prm->ureq[i].req) {
+- usb_ep_dequeue(ep, prm->ureq[i].req);
+- usb_ep_free_request(ep, prm->ureq[i].req);
++ if (usb_ep_dequeue(ep, prm->ureq[i].req))
++ usb_ep_free_request(ep, prm->ureq[i].req);
++ /*
++ * If usb_ep_dequeue() cannot successfully dequeue the
++ * request, the request will be freed by the completion
++ * callback.
++ */
++
+ prm->ureq[i].req = NULL;
+ }
+ }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 94398f89e600d..4168801b95955 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1386,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
+ index_value = get_ftdi_divisor(tty, port);
+ value = (u16)index_value;
+ index = (u16)(index_value >> 16);
+- if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
+- (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
++ if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
++ priv->chip_type == FT4232H || priv->chip_type == FT232H ||
++ priv->chip_type == FTX) {
+ /* Probably the BM type needs the MSB of the encoded fractional
+ * divider also moved like for the chips above. Any infos? */
+ index = (u16)((index << 8) | priv->interface);
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 41ee2984a0dff..785e975819278 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -1092,8 +1092,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (urb->transfer_buffer == NULL) {
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
+- if (!urb->transfer_buffer)
++ if (!urb->transfer_buffer) {
++ bytes_sent = -ENOMEM;
+ goto exit;
++ }
+ }
+ transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
+
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 23f91d658cb46..30c25ef0dacd2 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -883,8 +883,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (urb->transfer_buffer == NULL) {
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
+- if (!urb->transfer_buffer)
++ if (!urb->transfer_buffer) {
++ bytes_sent = -ENOMEM;
+ goto exit;
++ }
+ }
+ transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2049e66f34a3f..c6969ca728390 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1569,7 +1569,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
++ { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */
++ .driver_info = RSVD(3) | RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index be8067017eaa5..29dda60e3bcde 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -183,6 +183,7 @@ struct pl2303_type_data {
+ speed_t max_baud_rate;
+ unsigned long quirks;
+ unsigned int no_autoxonxoff:1;
++ unsigned int no_divisors:1;
+ };
+
+ struct pl2303_serial_private {
+@@ -209,6 +210,7 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
+ },
+ [TYPE_HXN] = {
+ .max_baud_rate = 12000000,
++ .no_divisors = true,
+ },
+ };
+
+@@ -571,8 +573,12 @@ static void pl2303_encode_baud_rate(struct tty_struct *tty,
+ baud = min_t(speed_t, baud, spriv->type->max_baud_rate);
+ /*
+ * Use direct method for supported baud rates, otherwise use divisors.
++ * Newer chip types do not support divisor encoding.
+ */
+- baud_sup = pl2303_get_supported_baud_rate(baud);
++ if (spriv->type->no_divisors)
++ baud_sup = baud;
++ else
++ baud_sup = pl2303_get_supported_baud_rate(baud);
+
+ if (baud == baud_sup)
+ baud = pl2303_encode_baud_rate_direct(buf, baud);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index b5fe6d2ad22f5..25fd971be63f7 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -1820,7 +1820,7 @@ static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+- if (offset + len < sizeof(struct virtio_net_config))
++ if (offset + len <= sizeof(struct virtio_net_config))
+ memcpy(buf, (u8 *)&ndev->config + offset, len);
+ }
+
+diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
+index 2296856340311..1bb7edac56899 100644
+--- a/drivers/vfio/pci/vfio_pci_zdev.c
++++ b/drivers/vfio/pci/vfio_pci_zdev.c
+@@ -74,6 +74,8 @@ static int zpci_util_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ int ret;
+
+ cap = kmalloc(cap_size, GFP_KERNEL);
++ if (!cap)
++ return -ENOMEM;
+
+ cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_UTIL;
+ cap->header.version = 1;
+@@ -98,6 +100,8 @@ static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_pci_device *vdev,
+ int ret;
+
+ cap = kmalloc(cap_size, GFP_KERNEL);
++ if (!cap)
++ return -ENOMEM;
+
+ cap->header.id = VFIO_DEVICE_INFO_CAP_ZPCI_PFIP;
+ cap->header.version = 1;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 0b4dedaa91289..78bd28873945a 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -236,6 +236,18 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
+ }
+ }
+
++static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu)
++{
++ struct rb_node *n;
++ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
++
++ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
++ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
++
++ bitmap_set(dma->bitmap, 0, dma->size >> pgshift);
++ }
++}
++
+ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
+ {
+ struct rb_node *n;
+@@ -945,6 +957,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+
+ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ {
++ WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
+ vfio_unmap_unpin(iommu, dma, true);
+ vfio_unlink_dma(iommu, dma);
+ put_task_struct(dma->task);
+@@ -2238,23 +2251,6 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
+ }
+ }
+
+-static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
+-{
+- struct rb_node *n;
+-
+- n = rb_first(&iommu->dma_list);
+- for (; n; n = rb_next(n)) {
+- struct vfio_dma *dma;
+-
+- dma = rb_entry(n, struct vfio_dma, node);
+-
+- if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
+- break;
+- }
+- /* mdev vendor driver must unregister notifier */
+- WARN_ON(iommu->notifier.head);
+-}
+-
+ /*
+ * Called when a domain is removed in detach. It is possible that
+ * the removed domain decided the iova aperture window. Modify the
+@@ -2354,10 +2350,10 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
+ kfree(group);
+
+ if (list_empty(&iommu->external_domain->group_list)) {
+- vfio_sanity_check_pfn_list(iommu);
+-
+- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
++ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
++ WARN_ON(iommu->notifier.head);
+ vfio_iommu_unmap_unpin_all(iommu);
++ }
+
+ kfree(iommu->external_domain);
+ iommu->external_domain = NULL;
+@@ -2391,10 +2387,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
+ */
+ if (list_empty(&domain->group_list)) {
+ if (list_is_singular(&iommu->domain_list)) {
+- if (!iommu->external_domain)
++ if (!iommu->external_domain) {
++ WARN_ON(iommu->notifier.head);
+ vfio_iommu_unmap_unpin_all(iommu);
+- else
++ } else {
+ vfio_iommu_unmap_unpin_reaccount(iommu);
++ }
+ }
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+@@ -2415,8 +2413,11 @@ detach_group_done:
+ * Removal of a group without dirty tracking may allow the iommu scope
+ * to be promoted.
+ */
+- if (update_dirty_scope)
++ if (update_dirty_scope) {
+ update_pinned_page_dirty_scope(iommu);
++ if (iommu->dirty_page_tracking)
++ vfio_iommu_populate_bitmap_full(iommu);
++ }
+ mutex_unlock(&iommu->lock);
+ }
+
+@@ -2475,7 +2476,6 @@ static void vfio_iommu_type1_release(void *iommu_data)
+
+ if (iommu->external_domain) {
+ vfio_release_domain(iommu->external_domain, true);
+- vfio_sanity_check_pfn_list(iommu);
+ kfree(iommu->external_domain);
+ }
+
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index cfb7f5612ef0f..4f02db65dedec 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1269,6 +1269,7 @@ config FB_ATY
+ select FB_CFB_IMAGEBLIT
+ select FB_BACKLIGHT if FB_ATY_BACKLIGHT
+ select FB_MACMODES if PPC
++ select FB_ATY_CT if SPARC64 && PCI
+ help
+ This driver supports graphics boards with the ATI Mach64 chips.
+ Say Y if you have such a graphics board.
+@@ -1279,7 +1280,6 @@ config FB_ATY
+ config FB_ATY_CT
+ bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
+ depends on PCI && FB_ATY
+- default y if SPARC64 && PCI
+ help
+ Say Y here to support use of ATI's 64-bit Rage boards (or other
+ boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
+diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
+index ea05af41ec69e..8d195e3f83012 100644
+--- a/drivers/virt/vboxguest/vboxguest_utils.c
++++ b/drivers/virt/vboxguest/vboxguest_utils.c
+@@ -468,7 +468,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
+ * Cancellation fun.
+ */
+ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+- u32 timeout_ms, bool *leak_it)
++ u32 timeout_ms, bool interruptible, bool *leak_it)
+ {
+ int rc, cancel_rc, ret;
+ long timeout;
+@@ -495,10 +495,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+ else
+ timeout = msecs_to_jiffies(timeout_ms);
+
+- timeout = wait_event_interruptible_timeout(
+- gdev->hgcm_wq,
+- hgcm_req_done(gdev, &call->header),
+- timeout);
++ if (interruptible) {
++ timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
++ hgcm_req_done(gdev, &call->header),
++ timeout);
++ } else {
++ timeout = wait_event_timeout(gdev->hgcm_wq,
++ hgcm_req_done(gdev, &call->header),
++ timeout);
++ }
+
+ /* timeout > 0 means hgcm_req_done has returned true, so success */
+ if (timeout > 0)
+@@ -631,7 +636,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ hgcm_call_init_call(call, client_id, function, parms, parm_count,
+ bounce_bufs);
+
+- ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
++ ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
++ requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
+ if (ret == 0) {
+ *vbox_status = call->header.result;
+ ret = hgcm_call_copy_back_result(call, parms, parm_count,
+diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
+index 3712b1e6dc71e..976eea28f268a 100644
+--- a/drivers/w1/slaves/w1_therm.c
++++ b/drivers/w1/slaves/w1_therm.c
+@@ -667,28 +667,24 @@ static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
+ */
+ static inline int w1_DS18B20_convert_temp(u8 rom[9])
+ {
+- int t;
+- u32 bv;
++ u16 bv;
++ s16 t;
++
++ /* Signed 16-bit value to unsigned, cpu order */
++ bv = le16_to_cpup((__le16 *)rom);
+
+ /* Config register bit R2 = 1 - GX20MH01 in 13 or 14 bit resolution mode */
+ if (rom[4] & 0x80) {
+- /* Signed 16-bit value to unsigned, cpu order */
+- bv = le16_to_cpup((__le16 *)rom);
+-
+ /* Insert two temperature bits from config register */
+ /* Avoid arithmetic shift of signed value */
+ bv = (bv << 2) | (rom[4] & 3);
+-
+- t = (int) sign_extend32(bv, 17); /* Degrees, lowest bit is 2^-6 */
+- return (t*1000)/64; /* Millidegrees */
++ t = (s16) bv; /* Degrees, lowest bit is 2^-6 */
++ return (int)t * 1000 / 64; /* Sign-extend to int; millidegrees */
+ }
+-
+- t = (int)le16_to_cpup((__le16 *)rom);
+- return t*1000/16;
++ t = (s16)bv; /* Degrees, lowest bit is 2^-4 */
++ return (int)t * 1000 / 16; /* Sign-extend to int; millidegrees */
+ }
+
+-
+-
+ /**
+ * w1_DS18S20_convert_temp() - temperature computation for DS18S20
+ * @rom: data read from device RAM (8 data bytes + 1 CRC byte)
+diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
+index 1ae03b64ef8bf..9b2173f765c8c 100644
+--- a/drivers/watchdog/intel-mid_wdt.c
++++ b/drivers/watchdog/intel-mid_wdt.c
+@@ -154,6 +154,10 @@ static int mid_wdt_probe(struct platform_device *pdev)
+ watchdog_set_nowayout(wdt_dev, WATCHDOG_NOWAYOUT);
+ watchdog_set_drvdata(wdt_dev, mid);
+
++ mid->scu = devm_intel_scu_ipc_dev_get(dev);
++ if (!mid->scu)
++ return -EPROBE_DEFER;
++
+ ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
+ IRQF_SHARED | IRQF_NO_SUSPEND, "watchdog",
+ wdt_dev);
+@@ -162,10 +166,6 @@ static int mid_wdt_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- mid->scu = devm_intel_scu_ipc_dev_get(dev);
+- if (!mid->scu)
+- return -EPROBE_DEFER;
+-
+ /*
+ * The firmware followed by U-Boot leaves the watchdog running
+ * with the default threshold which may vary. When we get here
+diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
+index 5391bf3e6b11d..c5967d8b4256a 100644
+--- a/drivers/watchdog/mei_wdt.c
++++ b/drivers/watchdog/mei_wdt.c
+@@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt)
+
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ watchdog_stop_on_reboot(&wdt->wdd);
++ watchdog_stop_on_unregister(&wdt->wdd);
+
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret)
+diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
+index 7cf0f2ec649b6..e38a87ffe5f5f 100644
+--- a/drivers/watchdog/qcom-wdt.c
++++ b/drivers/watchdog/qcom-wdt.c
+@@ -22,7 +22,6 @@ enum wdt_reg {
+ };
+
+ #define QCOM_WDT_ENABLE BIT(0)
+-#define QCOM_WDT_ENABLE_IRQ BIT(1)
+
+ static const u32 reg_offset_data_apcs_tmr[] = {
+ [WDT_RST] = 0x38,
+@@ -63,16 +62,6 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
+ return container_of(wdd, struct qcom_wdt, wdd);
+ }
+
+-static inline int qcom_get_enable(struct watchdog_device *wdd)
+-{
+- int enable = QCOM_WDT_ENABLE;
+-
+- if (wdd->pretimeout)
+- enable |= QCOM_WDT_ENABLE_IRQ;
+-
+- return enable;
+-}
+-
+ static irqreturn_t qcom_wdt_isr(int irq, void *arg)
+ {
+ struct watchdog_device *wdd = arg;
+@@ -91,7 +80,7 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
+ writel(1, wdt_addr(wdt, WDT_RST));
+ writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
+- writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN));
++ writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN));
+ return 0;
+ }
+
+diff --git a/fs/affs/namei.c b/fs/affs/namei.c
+index 41c5749f4db78..5400a876d73fb 100644
+--- a/fs/affs/namei.c
++++ b/fs/affs/namei.c
+@@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ return -EIO;
+
+ bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
+- if (!bh_new)
++ if (!bh_new) {
++ affs_brelse(bh_old);
+ return -EIO;
++ }
+
+ /* Remove old header from its parent directory. */
+ affs_lock_dir(old_dir);
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 9cadacf3ec275..7ac59a568595a 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -2541,13 +2541,6 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
+ list_del(&edge->list[UPPER]);
+ btrfs_backref_free_edge(cache, edge);
+
+- if (RB_EMPTY_NODE(&upper->rb_node)) {
+- BUG_ON(!list_empty(&node->upper));
+- btrfs_backref_drop_node(cache, node);
+- node = upper;
+- node->lowest = 1;
+- continue;
+- }
+ /*
+ * Add the node to leaf node list if no other child block
+ * cached.
+@@ -2624,7 +2617,7 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
+ /* Only reloc backref cache cares about a specific root */
+ if (cache->is_reloc) {
+ root = find_reloc_root(cache->fs_info, cur->bytenr);
+- if (WARN_ON(!root))
++ if (!root)
+ return -ENOENT;
+ cur->root = root;
+ } else {
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index ff705cc564a9a..17abde7f794ce 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -296,6 +296,9 @@ static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node)
+ {
+ if (node) {
++ ASSERT(list_empty(&node->list));
++ ASSERT(list_empty(&node->lower));
++ ASSERT(node->eb == NULL);
+ cache->nr_nodes--;
+ btrfs_put_root(node->root);
+ kfree(node);
+@@ -340,11 +343,11 @@ static inline void btrfs_backref_drop_node_buffer(
+ static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
+ struct btrfs_backref_node *node)
+ {
+- BUG_ON(!list_empty(&node->upper));
++ ASSERT(list_empty(&node->upper));
+
+ btrfs_backref_drop_node_buffer(node);
+- list_del(&node->list);
+- list_del(&node->lower);
++ list_del_init(&node->list);
++ list_del_init(&node->lower);
+ if (!RB_EMPTY_NODE(&node->rb_node))
+ rb_erase(&node->rb_node, &tree->rb_root);
+ btrfs_backref_free_node(tree, node);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 48ebc106a606c..3b1c387375a6b 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1371,9 +1371,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ btrfs_space_info_update_bytes_pinned(fs_info, space_info,
+ -block_group->pinned);
+ space_info->bytes_readonly += block_group->pinned;
+- percpu_counter_add_batch(&space_info->total_bytes_pinned,
+- -block_group->pinned,
+- BTRFS_TOTAL_BYTES_PINNED_BATCH);
++ __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
+ block_group->pinned = 0;
+
+ spin_unlock(&block_group->lock);
+@@ -2564,8 +2562,10 @@ again:
+
+ if (!path) {
+ path = btrfs_alloc_path();
+- if (!path)
+- return -ENOMEM;
++ if (!path) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ }
+
+ /*
+@@ -2659,16 +2659,14 @@ again:
+ btrfs_put_block_group(cache);
+ if (drop_reserve)
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+-
+- if (ret)
+- break;
+-
+ /*
+ * Avoid blocking other tasks for too long. It might even save
+ * us from writing caches for block groups that are going to be
+ * removed.
+ */
+ mutex_unlock(&trans->transaction->cache_write_mutex);
++ if (ret)
++ goto out;
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ }
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+@@ -2692,7 +2690,12 @@ again:
+ goto again;
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+- } else if (ret < 0) {
++ }
++out:
++ if (ret < 0) {
++ spin_lock(&cur_trans->dirty_bgs_lock);
++ list_splice_init(&dirty, &cur_trans->dirty_bgs);
++ spin_unlock(&cur_trans->dirty_bgs_lock);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
+ }
+
+@@ -2896,10 +2899,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+
+- percpu_counter_add_batch(
+- &cache->space_info->total_bytes_pinned,
+- num_bytes,
+- BTRFS_TOTAL_BYTES_PINNED_BATCH);
++ __btrfs_mod_total_bytes_pinned(cache->space_info,
++ num_bytes);
+ set_extent_dirty(&trans->transaction->pinned_extents,
+ bytenr, bytenr + num_bytes - 1,
+ GFP_NOFS | __GFP_NOFAIL);
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index cc89b63d65a4d..40bf27a65c5d5 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -221,9 +221,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ ret = btrfs_inc_ref(trans, root, cow, 1);
+ else
+ ret = btrfs_inc_ref(trans, root, cow, 0);
+-
+- if (ret)
++ if (ret) {
++ btrfs_tree_unlock(cow);
++ free_extent_buffer(cow);
++ btrfs_abort_transaction(trans, ret);
+ return ret;
++ }
+
+ btrfs_mark_buffer_dirty(cow);
+ *cow_ret = cow;
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 353cc2994d106..30883b9a26d84 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -648,12 +648,12 @@ inserted:
+ */
+ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *existing,
+- struct btrfs_delayed_ref_head *update,
+- int *old_ref_mod_ret)
++ struct btrfs_delayed_ref_head *update)
+ {
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
++ u64 flags = btrfs_ref_head_to_space_flags(existing);
+ int old_ref_mod;
+
+ BUG_ON(existing->is_data != update->is_data);
+@@ -701,8 +701,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
+ * currently, for refs we just added we know we're a-ok.
+ */
+ old_ref_mod = existing->total_ref_mod;
+- if (old_ref_mod_ret)
+- *old_ref_mod_ret = old_ref_mod;
+ existing->ref_mod += update->ref_mod;
+ existing->total_ref_mod += update->ref_mod;
+
+@@ -724,6 +722,27 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
+ trans->delayed_ref_updates += csum_leaves;
+ }
+ }
++
++ /*
++ * This handles the following conditions:
++ *
++ * 1. We had a ref mod of 0 or more and went negative, indicating that
++ * we may be freeing space, so add our space to the
++ * total_bytes_pinned counter.
++ * 2. We were negative and went to 0 or positive, so no longer can say
++ * that the space would be pinned, decrement our counter from the
++ * total_bytes_pinned counter.
++ * 3. We are now at 0 and have ->must_insert_reserved set, which means
++ * this was a new allocation and then we dropped it, and thus must
++ * add our space to the total_bytes_pinned counter.
++ */
++ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
++ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
++ else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
++ btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
++ else if (existing->total_ref_mod == 0 && existing->must_insert_reserved)
++ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
++
+ spin_unlock(&existing->lock);
+ }
+
+@@ -798,8 +817,7 @@ static noinline struct btrfs_delayed_ref_head *
+ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+- int action, int *qrecord_inserted_ret,
+- int *old_ref_mod, int *new_ref_mod)
++ int action, int *qrecord_inserted_ret)
+ {
+ struct btrfs_delayed_ref_head *existing;
+ struct btrfs_delayed_ref_root *delayed_refs;
+@@ -821,8 +839,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ existing = htree_insert(&delayed_refs->href_root,
+ &head_ref->href_node);
+ if (existing) {
+- update_existing_head_ref(trans, existing, head_ref,
+- old_ref_mod);
++ update_existing_head_ref(trans, existing, head_ref);
+ /*
+ * we've updated the existing ref, free the newly
+ * allocated ref
+@@ -830,14 +847,17 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
+ head_ref = existing;
+ } else {
+- if (old_ref_mod)
+- *old_ref_mod = 0;
++ u64 flags = btrfs_ref_head_to_space_flags(head_ref);
++
+ if (head_ref->is_data && head_ref->ref_mod < 0) {
+ delayed_refs->pending_csums += head_ref->num_bytes;
+ trans->delayed_ref_updates +=
+ btrfs_csum_bytes_to_leaves(trans->fs_info,
+ head_ref->num_bytes);
+ }
++ if (head_ref->ref_mod < 0)
++ btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
++ head_ref->num_bytes);
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
+ atomic_inc(&delayed_refs->num_entries);
+@@ -845,8 +865,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ }
+ if (qrecord_inserted_ret)
+ *qrecord_inserted_ret = qrecord_inserted;
+- if (new_ref_mod)
+- *new_ref_mod = head_ref->total_ref_mod;
+
+ return head_ref;
+ }
+@@ -909,8 +927,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
+ */
+ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+- struct btrfs_delayed_extent_op *extent_op,
+- int *old_ref_mod, int *new_ref_mod)
++ struct btrfs_delayed_extent_op *extent_op)
+ {
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_tree_ref *ref;
+@@ -977,8 +994,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(trans, head_ref, record,
+- action, &qrecord_inserted,
+- old_ref_mod, new_ref_mod);
++ action, &qrecord_inserted);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+@@ -1006,8 +1022,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ */
+ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+- u64 reserved, int *old_ref_mod,
+- int *new_ref_mod)
++ u64 reserved)
+ {
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_data_ref *ref;
+@@ -1073,8 +1088,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(trans, head_ref, record,
+- action, &qrecord_inserted,
+- old_ref_mod, new_ref_mod);
++ action, &qrecord_inserted);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+@@ -1117,7 +1131,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+ spin_lock(&delayed_refs->lock);
+
+ add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
+- NULL, NULL, NULL);
++ NULL);
+
+ spin_unlock(&delayed_refs->lock);
+
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 1c977e6d45dc3..3ba140468f126 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -326,6 +326,16 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
+ }
+ }
+
++static inline u64 btrfs_ref_head_to_space_flags(
++ struct btrfs_delayed_ref_head *head_ref)
++{
++ if (head_ref->is_data)
++ return BTRFS_BLOCK_GROUP_DATA;
++ else if (head_ref->is_system)
++ return BTRFS_BLOCK_GROUP_SYSTEM;
++ return BTRFS_BLOCK_GROUP_METADATA;
++}
++
+ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
+ {
+ if (refcount_dec_and_test(&head->refs))
+@@ -334,12 +344,10 @@ static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *hea
+
+ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+- struct btrfs_delayed_extent_op *extent_op,
+- int *old_ref_mod, int *new_ref_mod);
++ struct btrfs_delayed_extent_op *extent_op);
+ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+- u64 reserved, int *old_ref_mod,
+- int *new_ref_mod);
++ u64 reserved);
+ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+ u64 bytenr, u64 num_bytes,
+ struct btrfs_delayed_extent_op *extent_op);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0c335dae5af7a..6f0c59debc2b3 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -82,41 +82,6 @@ void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
+ EXTENT_UPTODATE);
+ }
+
+-static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
+-{
+- if (ref->type == BTRFS_REF_METADATA) {
+- if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
+- return BTRFS_BLOCK_GROUP_SYSTEM;
+- else
+- return BTRFS_BLOCK_GROUP_METADATA;
+- }
+- return BTRFS_BLOCK_GROUP_DATA;
+-}
+-
+-static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
+- struct btrfs_ref *ref)
+-{
+- struct btrfs_space_info *space_info;
+- u64 flags = generic_ref_to_space_flags(ref);
+-
+- space_info = btrfs_find_space_info(fs_info, flags);
+- ASSERT(space_info);
+- percpu_counter_add_batch(&space_info->total_bytes_pinned, ref->len,
+- BTRFS_TOTAL_BYTES_PINNED_BATCH);
+-}
+-
+-static void sub_pinned_bytes(struct btrfs_fs_info *fs_info,
+- struct btrfs_ref *ref)
+-{
+- struct btrfs_space_info *space_info;
+- u64 flags = generic_ref_to_space_flags(ref);
+-
+- space_info = btrfs_find_space_info(fs_info, flags);
+- ASSERT(space_info);
+- percpu_counter_add_batch(&space_info->total_bytes_pinned, -ref->len,
+- BTRFS_TOTAL_BYTES_PINNED_BATCH);
+-}
+-
+ /* simple helper to search for an existing data extent at a given offset */
+ int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
+ {
+@@ -1388,7 +1353,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref)
+ {
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+- int old_ref_mod, new_ref_mod;
+ int ret;
+
+ ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
+@@ -1397,17 +1361,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
+
+ if (generic_ref->type == BTRFS_REF_METADATA)
+- ret = btrfs_add_delayed_tree_ref(trans, generic_ref,
+- NULL, &old_ref_mod, &new_ref_mod);
++ ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
+ else
+- ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0,
+- &old_ref_mod, &new_ref_mod);
++ ret = btrfs_add_delayed_data_ref(trans, generic_ref, 0);
+
+ btrfs_ref_tree_mod(fs_info, generic_ref);
+
+- if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
+- sub_pinned_bytes(fs_info, generic_ref);
+-
+ return ret;
+ }
+
+@@ -1795,34 +1754,28 @@ void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+ {
+ int nr_items = 1; /* Dropping this ref head update. */
+
+- if (head->total_ref_mod < 0) {
+- struct btrfs_space_info *space_info;
+- u64 flags;
++ /*
++ * We had csum deletions accounted for in our delayed refs rsv, we need
++ * to drop the csum leaves for this update from our delayed_refs_rsv.
++ */
++ if (head->total_ref_mod < 0 && head->is_data) {
++ spin_lock(&delayed_refs->lock);
++ delayed_refs->pending_csums -= head->num_bytes;
++ spin_unlock(&delayed_refs->lock);
++ nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
++ }
+
+- if (head->is_data)
+- flags = BTRFS_BLOCK_GROUP_DATA;
+- else if (head->is_system)
+- flags = BTRFS_BLOCK_GROUP_SYSTEM;
+- else
+- flags = BTRFS_BLOCK_GROUP_METADATA;
+- space_info = btrfs_find_space_info(fs_info, flags);
+- ASSERT(space_info);
+- percpu_counter_add_batch(&space_info->total_bytes_pinned,
+- -head->num_bytes,
+- BTRFS_TOTAL_BYTES_PINNED_BATCH);
++ /*
++ * We were dropping refs, or had a new ref and dropped it, and thus must
++ * adjust down our total_bytes_pinned, the space may or may not have
++ * been pinned and so is accounted for properly in the pinned space by
++ * now.
++ */
++ if (head->total_ref_mod < 0 ||
++ (head->total_ref_mod == 0 && head->must_insert_reserved)) {
++ u64 flags = btrfs_ref_head_to_space_flags(head);
+
+- /*
+- * We had csum deletions accounted for in our delayed refs rsv,
+- * we need to drop the csum leaves for this update from our
+- * delayed_refs_rsv.
+- */
+- if (head->is_data) {
+- spin_lock(&delayed_refs->lock);
+- delayed_refs->pending_csums -= head->num_bytes;
+- spin_unlock(&delayed_refs->lock);
+- nr_items += btrfs_csum_bytes_to_leaves(fs_info,
+- head->num_bytes);
+- }
++ btrfs_mod_total_bytes_pinned(fs_info, flags, -head->num_bytes);
+ }
+
+ btrfs_delayed_refs_rsv_release(fs_info, nr_items);
+@@ -2572,8 +2525,7 @@ static int pin_down_extent(struct btrfs_trans_handle *trans,
+ spin_unlock(&cache->lock);
+ spin_unlock(&cache->space_info->lock);
+
+- percpu_counter_add_batch(&cache->space_info->total_bytes_pinned,
+- num_bytes, BTRFS_TOTAL_BYTES_PINNED_BATCH);
++ __btrfs_mod_total_bytes_pinned(cache->space_info, num_bytes);
+ set_extent_dirty(&trans->transaction->pinned_extents, bytenr,
+ bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
+ return 0;
+@@ -2784,8 +2736,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+ cache->pinned -= len;
+ btrfs_space_info_update_bytes_pinned(fs_info, space_info, -len);
+ space_info->max_extent_size = 0;
+- percpu_counter_add_batch(&space_info->total_bytes_pinned,
+- -len, BTRFS_TOTAL_BYTES_PINNED_BATCH);
++ __btrfs_mod_total_bytes_pinned(space_info, -len);
+ if (cache->ro) {
+ space_info->bytes_readonly += len;
+ readonly = true;
+@@ -3318,7 +3269,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_ref generic_ref = { 0 };
+- int pin = 1;
+ int ret;
+
+ btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
+@@ -3327,13 +3277,9 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ root->root_key.objectid);
+
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+- int old_ref_mod, new_ref_mod;
+-
+ btrfs_ref_tree_mod(fs_info, &generic_ref);
+- ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL,
+- &old_ref_mod, &new_ref_mod);
++ ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
+ BUG_ON(ret); /* -ENOMEM */
+- pin = old_ref_mod >= 0 && new_ref_mod < 0;
+ }
+
+ if (last_ref && btrfs_header_generation(buf) == trans->transid) {
+@@ -3345,7 +3291,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ goto out;
+ }
+
+- pin = 0;
+ cache = btrfs_lookup_block_group(fs_info, buf->start);
+
+ if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
+@@ -3362,9 +3307,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
+ }
+ out:
+- if (pin)
+- add_pinned_bytes(fs_info, &generic_ref);
+-
+ if (last_ref) {
+ /*
+ * Deleting the buffer, clear the corrupt flag since it doesn't
+@@ -3378,7 +3320,6 @@ out:
+ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ {
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+- int old_ref_mod, new_ref_mod;
+ int ret;
+
+ if (btrfs_is_testing(fs_info))
+@@ -3394,14 +3335,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
+ /* unlocks the pinned mutex */
+ btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
+- old_ref_mod = new_ref_mod = 0;
+ ret = 0;
+ } else if (ref->type == BTRFS_REF_METADATA) {
+- ret = btrfs_add_delayed_tree_ref(trans, ref, NULL,
+- &old_ref_mod, &new_ref_mod);
++ ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
+ } else {
+- ret = btrfs_add_delayed_data_ref(trans, ref, 0,
+- &old_ref_mod, &new_ref_mod);
++ ret = btrfs_add_delayed_data_ref(trans, ref, 0);
+ }
+
+ if (!((ref->type == BTRFS_REF_METADATA &&
+@@ -3410,9 +3348,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
+ btrfs_ref_tree_mod(fs_info, ref);
+
+- if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
+- add_pinned_bytes(fs_info, ref);
+-
+ return ret;
+ }
+
+@@ -4528,7 +4463,6 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_key *ins)
+ {
+ struct btrfs_ref generic_ref = { 0 };
+- int ret;
+
+ BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+
+@@ -4536,9 +4470,8 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
+ ins->objectid, ins->offset, 0);
+ btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner, offset);
+ btrfs_ref_tree_mod(root->fs_info, &generic_ref);
+- ret = btrfs_add_delayed_data_ref(trans, &generic_ref,
+- ram_bytes, NULL, NULL);
+- return ret;
++
++ return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
+ }
+
+ /*
+@@ -4730,8 +4663,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ generic_ref.real_root = root->root_key.objectid;
+ btrfs_init_tree_ref(&generic_ref, level, root_objectid);
+ btrfs_ref_tree_mod(fs_info, &generic_ref);
+- ret = btrfs_add_delayed_tree_ref(trans, &generic_ref,
+- extent_op, NULL, NULL);
++ ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
+ if (ret)
+ goto out_free_delayed;
+ }
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 4d8897879c9cb..71d0d14bc18b3 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -775,8 +775,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ while (num_entries) {
+ e = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
+- if (!e)
++ if (!e) {
++ ret = -ENOMEM;
+ goto free_cache;
++ }
+
+ ret = io_ctl_read_entry(&io_ctl, e, &type);
+ if (ret) {
+@@ -785,6 +787,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ }
+
+ if (!e->bytes) {
++ ret = -1;
+ kmem_cache_free(btrfs_free_space_cachep, e);
+ goto free_cache;
+ }
+@@ -805,6 +808,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ e->bitmap = kmem_cache_zalloc(
+ btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ if (!e->bitmap) {
++ ret = -ENOMEM;
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
+ goto free_cache;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a8e0a6b038d3e..ad34c5a09befc 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8186,8 +8186,9 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
+
+ if (!inode_evicting)
+ lock_extent_bits(tree, page_start, page_end, &cached_state);
+-again:
++
+ start = page_start;
++again:
+ ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1);
+ if (ordered) {
+ found_ordered = true;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index df63ef64c5c0d..c01e0d7bef2c9 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -668,9 +668,7 @@ static void __del_reloc_root(struct btrfs_root *root)
+ RB_CLEAR_NODE(&node->rb_node);
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+- if (!node)
+- return;
+- BUG_ON((struct btrfs_root *)node->data != root);
++ ASSERT(!node || (struct btrfs_root *)node->data == root);
+ }
+
+ /*
+diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
+index 5646393b928c9..74706f604bce1 100644
+--- a/fs/btrfs/space-info.h
++++ b/fs/btrfs/space-info.h
+@@ -152,4 +152,21 @@ static inline void btrfs_space_info_free_bytes_may_use(
+ int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
+ enum btrfs_reserve_flush_enum flush);
+
++static inline void __btrfs_mod_total_bytes_pinned(
++ struct btrfs_space_info *space_info,
++ s64 mod)
++{
++ percpu_counter_add_batch(&space_info->total_bytes_pinned, mod,
++ BTRFS_TOTAL_BYTES_PINNED_BATCH);
++}
++
++static inline void btrfs_mod_total_bytes_pinned(struct btrfs_fs_info *fs_info,
++ u64 flags, s64 mod)
++{
++ struct btrfs_space_info *space_info = btrfs_find_space_info(fs_info, flags);
++
++ ASSERT(space_info);
++ __btrfs_mod_total_bytes_pinned(space_info, mod);
++}
++
+ #endif /* BTRFS_SPACE_INFO_H */
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 255a512f1277e..638d18c198ea7 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -3093,10 +3093,12 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
+ dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
+ last ? " last" : "", put ? " put" : "");
+
+- if (last && !skip_checking_caps)
+- ceph_check_caps(ci, 0, NULL);
+- else if (flushsnaps)
+- ceph_flush_snaps(ci, NULL);
++ if (!skip_checking_caps) {
++ if (last)
++ ceph_check_caps(ci, 0, NULL);
++ else if (flushsnaps)
++ ceph_flush_snaps(ci, NULL);
++ }
+ if (wake)
+ wake_up_all(&ci->i_cap_wq);
+ while (put-- > 0)
+diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
+index d35f599aa00e6..f2d730fffccb3 100644
+--- a/fs/cifs/cifs_swn.c
++++ b/fs/cifs/cifs_swn.c
+@@ -272,7 +272,7 @@ static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
+ if (IS_ERR(share_name)) {
+ int ret;
+
+- ret = PTR_ERR(net_name);
++ ret = PTR_ERR(share_name);
+ cifs_dbg(VFS, "%s: failed to extract share name from target '%s': %d\n",
+ __func__, tcon->treeName, ret);
+ kfree(net_name);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 4bb9decbbf27f..1439d3c9ff773 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3038,96 +3038,91 @@ static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it,
+ return 0;
+ }
+
+-static int setup_dfs_tgt_conn(const char *path, const char *full_path,
+- const struct dfs_cache_tgt_iterator *tgt_it,
+- struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
+- unsigned int *xid, struct TCP_Server_Info **server,
+- struct cifs_ses **ses, struct cifs_tcon **tcon)
+-{
+- int rc;
+- struct dfs_info3_param ref = {0};
+- char *mdata = NULL;
+- struct smb3_fs_context fake_ctx = {NULL};
+- char *fake_devname = NULL;
+-
+- cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
+-
+- rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
+- if (rc)
+- return rc;
+-
+- mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
+- full_path + 1, &ref,
+- &fake_devname);
+- free_dfs_info_param(&ref);
+-
+- if (IS_ERR(mdata)) {
+- rc = PTR_ERR(mdata);
+- mdata = NULL;
+- } else
+- rc = cifs_setup_volume_info(&fake_ctx, mdata, fake_devname);
+-
+- kfree(mdata);
+- kfree(fake_devname);
+-
+- if (!rc) {
+- /*
+- * We use a 'fake_ctx' here because we need pass it down to the
+- * mount_{get,put} functions to test connection against new DFS
+- * targets.
+- */
+- mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
+- rc = mount_get_conns(&fake_ctx, cifs_sb, xid, server, ses,
+- tcon);
+- if (!rc || (*server && *ses)) {
+- /*
+- * We were able to connect to new target server.
+- * Update current context with new target server.
+- */
+- rc = update_vol_info(tgt_it, &fake_ctx, ctx);
+- }
+- }
+- smb3_cleanup_fs_context_contents(&fake_ctx);
+- return rc;
+-}
+-
+ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_sb_info *cifs_sb,
+ struct smb3_fs_context *ctx, struct cifs_ses *root_ses,
+ unsigned int *xid, struct TCP_Server_Info **server,
+ struct cifs_ses **ses, struct cifs_tcon **tcon)
+ {
+ int rc;
+- struct dfs_cache_tgt_list tgt_list;
++ struct dfs_cache_tgt_list tgt_list = {0};
+ struct dfs_cache_tgt_iterator *tgt_it = NULL;
++ struct smb3_fs_context tmp_ctx = {NULL};
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ return -EOPNOTSUPP;
+
++ cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, path, full_path);
++
+ rc = dfs_cache_noreq_find(path, NULL, &tgt_list);
+ if (rc)
+ return rc;
++ /*
++ * We use a 'tmp_ctx' here because we need pass it down to the mount_{get,put} functions to
++ * test connection against new DFS targets.
++ */
++ rc = smb3_fs_context_dup(&tmp_ctx, ctx);
++ if (rc)
++ goto out;
+
+ for (;;) {
++ struct dfs_info3_param ref = {0};
++ char *fake_devname = NULL, *mdata = NULL;
++
+ /* Get next DFS target server - if any */
+ rc = get_next_dfs_tgt(path, &tgt_list, &tgt_it);
+ if (rc)
+ break;
+- /* Connect to next DFS target */
+- rc = setup_dfs_tgt_conn(path, full_path, tgt_it, cifs_sb, ctx, xid, server, ses,
+- tcon);
+- if (!rc || (*server && *ses))
++
++ rc = dfs_cache_get_tgt_referral(path, tgt_it, &ref);
++ if (rc)
++ break;
++
++ cifs_dbg(FYI, "%s: old ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
++ tmp_ctx.prepath);
++
++ mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, &ref,
++ &fake_devname);
++ free_dfs_info_param(&ref);
++
++ if (IS_ERR(mdata)) {
++ rc = PTR_ERR(mdata);
++ mdata = NULL;
++ } else
++ rc = cifs_setup_volume_info(&tmp_ctx, mdata, fake_devname);
++
++ kfree(mdata);
++ kfree(fake_devname);
++
++ if (rc)
++ break;
++
++ cifs_dbg(FYI, "%s: new ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
++ tmp_ctx.prepath);
++
++ mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
++ rc = mount_get_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
++ if (!rc || (*server && *ses)) {
++ /*
++ * We were able to connect to new target server. Update current context with
++ * new target server.
++ */
++ rc = update_vol_info(tgt_it, &tmp_ctx, ctx);
+ break;
++ }
+ }
+ if (!rc) {
++ cifs_dbg(FYI, "%s: final ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
++ tmp_ctx.prepath);
+ /*
+- * Update DFS target hint in DFS referral cache with the target
+- * server we successfully reconnected to.
++ * Update DFS target hint in DFS referral cache with the target server we
++ * successfully reconnected to.
+ */
+- rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses,
+- cifs_sb->local_nls,
+- cifs_remap(cifs_sb), path,
+- tgt_it);
++ rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses, cifs_sb->local_nls,
++ cifs_remap(cifs_sb), path, tgt_it);
+ }
++
++out:
++ smb3_cleanup_fs_context_contents(&tmp_ctx);
+ dfs_cache_free_tgts(&tgt_list);
+ return rc;
+ }
+@@ -3285,77 +3280,77 @@ static void put_root_ses(struct cifs_ses *ses)
+ cifs_put_smb_ses(ses);
+ }
+
+-/* Check if a path component is remote and then update @dfs_path accordingly */
+-static int check_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
+- const unsigned int xid, struct TCP_Server_Info *server,
+- struct cifs_tcon *tcon, char **dfs_path)
++/* Set up next dfs prefix path in @dfs_path */
++static int next_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
++ const unsigned int xid, struct TCP_Server_Info *server,
++ struct cifs_tcon *tcon, char **dfs_path)
+ {
+- char *path, *s;
+- char sep = CIFS_DIR_SEP(cifs_sb), tmp;
+- char *npath;
+- int rc = 0;
+- int added_treename = tcon->Flags & SMB_SHARE_IS_IN_DFS;
+- int skip = added_treename;
++ char *path, *npath;
++ int added_treename = is_tcon_dfs(tcon);
++ int rc;
+
+ path = cifs_build_path_to_root(ctx, cifs_sb, tcon, added_treename);
+ if (!path)
+ return -ENOMEM;
+
+- /*
+- * Walk through the path components in @path and check if they're accessible. In case any of
+- * the components is -EREMOTE, then update @dfs_path with the next DFS referral request path
+- * (NOT including the remaining components).
+- */
+- s = path;
+- do {
+- /* skip separators */
+- while (*s && *s == sep)
+- s++;
+- if (!*s)
+- break;
+- /* next separator */
+- while (*s && *s != sep)
+- s++;
+- /*
+- * if the treename is added, we then have to skip the first
+- * part within the separators
+- */
+- if (skip) {
+- skip = 0;
+- continue;
++ rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
++ if (rc == -EREMOTE) {
++ struct smb3_fs_context v = {NULL};
++ /* if @path contains a tree name, skip it in the prefix path */
++ if (added_treename) {
++ rc = smb3_parse_devname(path, &v);
++ if (rc)
++ goto out;
++ npath = build_unc_path_to_root(&v, cifs_sb, true);
++ smb3_cleanup_fs_context_contents(&v);
++ } else {
++ v.UNC = ctx->UNC;
++ v.prepath = path + 1;
++ npath = build_unc_path_to_root(&v, cifs_sb, true);
+ }
+- tmp = *s;
+- *s = 0;
+- rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, path);
+- if (rc && rc == -EREMOTE) {
+- struct smb3_fs_context v = {NULL};
+- /* if @path contains a tree name, skip it in the prefix path */
+- if (added_treename) {
+- rc = smb3_parse_devname(path, &v);
+- if (rc)
+- break;
+- rc = -EREMOTE;
+- npath = build_unc_path_to_root(&v, cifs_sb, true);
+- smb3_cleanup_fs_context_contents(&v);
+- } else {
+- v.UNC = ctx->UNC;
+- v.prepath = path + 1;
+- npath = build_unc_path_to_root(&v, cifs_sb, true);
+- }
+- if (IS_ERR(npath)) {
+- rc = PTR_ERR(npath);
+- break;
+- }
+- kfree(*dfs_path);
+- *dfs_path = npath;
++
++ if (IS_ERR(npath)) {
++ rc = PTR_ERR(npath);
++ goto out;
+ }
+- *s = tmp;
+- } while (rc == 0);
+
++ kfree(*dfs_path);
++ *dfs_path = npath;
++ rc = -EREMOTE;
++ }
++
++out:
+ kfree(path);
+ return rc;
+ }
+
++/* Check if resolved targets can handle any DFS referrals */
++static int is_referral_server(const char *ref_path, struct cifs_tcon *tcon, bool *ref_server)
++{
++ int rc;
++ struct dfs_info3_param ref = {0};
++
++ if (is_tcon_dfs(tcon)) {
++ *ref_server = true;
++ } else {
++ cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
++
++ rc = dfs_cache_noreq_find(ref_path, &ref, NULL);
++ if (rc) {
++ cifs_dbg(VFS, "%s: dfs_cache_noreq_find: failed (rc=%d)\n", __func__, rc);
++ return rc;
++ }
++ cifs_dbg(FYI, "%s: ref.flags=0x%x\n", __func__, ref.flags);
++ /*
++ * Check if all targets are capable of handling DFS referrals as per
++ * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
++ */
++ *ref_server = !!(ref.flags & DFSREF_REFERRAL_SERVER);
++ free_dfs_info_param(&ref);
++ }
++ return 0;
++}
++
+ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ {
+ int rc = 0;
+@@ -3367,18 +3362,19 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ char *ref_path = NULL, *full_path = NULL;
+ char *oldmnt = NULL;
+ char *mntdata = NULL;
++ bool ref_server = false;
+
+ rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ /*
+- * Unconditionally try to get an DFS referral (even cached) to determine whether it is an
+- * DFS mount.
++ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
++ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+ *
+ * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
+ * to respond with PATH_NOT_COVERED to requests that include the prefix.
+ */
+- if (dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
++ dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
+ NULL)) {
+- /* No DFS referral was returned. Looks like a regular share. */
+ if (rc)
+ goto error;
+ /* Check if it is fully accessible and then mount it */
+@@ -3432,13 +3428,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ break;
+ if (!tcon)
+ continue;
++
+ /* Make sure that requests go through new root servers */
+- if (is_tcon_dfs(tcon)) {
++ rc = is_referral_server(ref_path + 1, tcon, &ref_server);
++ if (rc)
++ break;
++ if (ref_server) {
+ put_root_ses(root_ses);
+ set_root_ses(cifs_sb, ses, &root_ses);
+ }
+- /* Check for remaining path components and then continue chasing them (-EREMOTE) */
+- rc = check_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
++
++ /* Get next dfs path and then continue chasing them if -EREMOTE */
++ rc = next_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
+ /* Prevent recursion on broken link referrals */
+ if (rc == -EREMOTE && ++count > MAX_NESTED_LINKS)
+ rc = -ELOOP;
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 4950ab0486aee..098b4bc8da59a 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -37,11 +37,12 @@ struct cache_dfs_tgt {
+ struct cache_entry {
+ struct hlist_node hlist;
+ const char *path;
+- int ttl;
+- int srvtype;
+- int flags;
++ int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
++ int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
++ int srvtype; /* DFS_REREFERRAL_V3.ServerType */
++ int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
+ struct timespec64 etime;
+- int path_consumed;
++ int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
+ int numtgts;
+ struct list_head tlist;
+ struct cache_dfs_tgt *tgthint;
+@@ -166,14 +167,11 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
+ continue;
+
+ seq_printf(m,
+- "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
+- "interlink=%s,path_consumed=%d,expired=%s\n",
+- ce->path,
+- ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+- ce->ttl, ce->etime.tv_nsec,
+- IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
+- ce->path_consumed,
+- cache_entry_expired(ce) ? "yes" : "no");
++ "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
++ ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
++ ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
++ IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
++ ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
+
+ list_for_each_entry(t, &ce->tlist, list) {
+ seq_printf(m, " %s%s\n",
+@@ -236,11 +234,12 @@ static inline void dump_tgts(const struct cache_entry *ce)
+
+ static inline void dump_ce(const struct cache_entry *ce)
+ {
+- cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
++ cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path,
+ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+ ce->etime.tv_nsec,
+- IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
++ ce->hdr_flags, ce->ref_flags,
++ IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
+ ce->path_consumed,
+ cache_entry_expired(ce) ? "yes" : "no");
+ dump_tgts(ce);
+@@ -381,7 +380,8 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
+ ce->ttl = refs[0].ttl;
+ ce->etime = get_expire_time(ce->ttl);
+ ce->srvtype = refs[0].server_type;
+- ce->flags = refs[0].ref_flag;
++ ce->hdr_flags = refs[0].flags;
++ ce->ref_flags = refs[0].ref_flag;
+ ce->path_consumed = refs[0].path_consumed;
+
+ for (i = 0; i < numrefs; i++) {
+@@ -799,7 +799,8 @@ static int setup_referral(const char *path, struct cache_entry *ce,
+ ref->path_consumed = ce->path_consumed;
+ ref->ttl = ce->ttl;
+ ref->server_type = ce->srvtype;
+- ref->ref_flag = ce->flags;
++ ref->ref_flag = ce->ref_flags;
++ ref->flags = ce->hdr_flags;
+
+ return 0;
+
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 12a5da0230b52..798c32cab146f 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -542,20 +542,37 @@ static int smb3_fs_context_parse_monolithic(struct fs_context *fc,
+
+ /* BB Need to add support for sep= here TBD */
+ while ((key = strsep(&options, ",")) != NULL) {
+- if (*key) {
+- size_t v_len = 0;
+- char *value = strchr(key, '=');
+-
+- if (value) {
+- if (value == key)
+- continue;
+- *value++ = 0;
+- v_len = strlen(value);
+- }
+- ret = vfs_parse_fs_string(fc, key, value, v_len);
+- if (ret < 0)
+- break;
++ size_t len;
++ char *value;
++
++ if (*key == 0)
++ break;
++
++ /* Check if following character is the deliminator If yes,
++ * we have encountered a double deliminator reset the NULL
++ * character to the deliminator
++ */
++ while (options && options[0] == ',') {
++ len = strlen(key);
++ strcpy(key + len, options);
++ options = strchr(options, ',');
++ if (options)
++ *options++ = 0;
+ }
++
++
++ len = 0;
++ value = strchr(key, '=');
++ if (value) {
++ if (value == key)
++ continue;
++ *value++ = 0;
++ len = strlen(value);
++ }
++
++ ret = vfs_parse_fs_string(fc, key, value, len);
++ if (ret < 0)
++ break;
+ }
+
+ return ret;
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 2fcf66473436b..86c7f04896207 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -297,7 +297,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
+ {
+ struct dentry *dentry;
+
+- if (IS_ERR(parent))
++ if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent))
+ return NULL;
+
+ if (!parent)
+@@ -318,6 +318,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
+ if (!(debugfs_allow & DEBUGFS_ALLOW_API))
+ return ERR_PTR(-EPERM);
+
++ if (!debugfs_initialized())
++ return ERR_PTR(-ENOENT);
++
+ pr_debug("creating file '%s'\n", name);
+
+ if (IS_ERR(parent))
+diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
+index 5bde77d708524..47314a26767a8 100644
+--- a/fs/erofs/xattr.c
++++ b/fs/erofs/xattr.c
+@@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode)
+ int ret = 0;
+
+ /* the most case is that xattrs of this inode are initialized. */
+- if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
++ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
++ /*
++ * paired with smp_mb() at the end of the function to ensure
++ * fields will only be observed after the bit is set.
++ */
++ smp_mb();
+ return 0;
++ }
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+@@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode)
+ }
+ xattr_iter_end(&it, atomic_map);
+
++ /* paired with smp_mb() at the beginning of the function. */
++ smp_mb();
+ set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
+
+ out_unlock:
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index ae325541884e3..14d2de35110cc 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -36,8 +36,14 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+ void *kaddr;
+ struct z_erofs_map_header *h;
+
+- if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
++ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
++ /*
++ * paired with smp_mb() at the end of the function to ensure
++ * fields will only be observed after the bit is set.
++ */
++ smp_mb();
+ return 0;
++ }
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+@@ -83,6 +89,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
+
+ vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
+ ((h->h_clusterbits >> 5) & 7);
++ /* paired with smp_mb() at the beginning of the function */
++ smp_mb();
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+ unmap_done:
+ kunmap_atomic(kaddr);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index a829af074eb58..3196474cbe24c 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -979,7 +979,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
+ return epir;
+ }
+
+-#ifdef CONFIG_CHECKPOINT_RESTORE
++#ifdef CONFIG_KCMP
+ static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
+ {
+ struct rb_node *rbp;
+@@ -1021,7 +1021,7 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
+
+ return file_raw;
+ }
+-#endif /* CONFIG_CHECKPOINT_RESTORE */
++#endif /* CONFIG_KCMP */
+
+ /**
+ * Adds a new entry to the tail of the list in a lockless way, i.e.
+diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
+index 6aec6288e1f21..7f39b1c6469c4 100644
+--- a/fs/exfat/exfat_raw.h
++++ b/fs/exfat/exfat_raw.h
+@@ -77,6 +77,10 @@
+
+ #define EXFAT_FILE_NAME_LEN 15
+
++#define EXFAT_MIN_SECT_SIZE_BITS 9
++#define EXFAT_MAX_SECT_SIZE_BITS 12
++#define EXFAT_MAX_SECT_PER_CLUS_BITS(x) (25 - (x)->sect_size_bits)
++
+ /* EXFAT: Main and Backup Boot Sector (512 bytes) */
+ struct boot_sector {
+ __u8 jmp_boot[BOOTSEC_JUMP_BOOT_LEN];
+diff --git a/fs/exfat/super.c b/fs/exfat/super.c
+index 87be5bfc31eb4..c6d8d2e534865 100644
+--- a/fs/exfat/super.c
++++ b/fs/exfat/super.c
+@@ -381,8 +381,7 @@ static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect)
+ {
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+- if (!is_power_of_2(logical_sect) ||
+- logical_sect < 512 || logical_sect > 4096) {
++ if (!is_power_of_2(logical_sect)) {
+ exfat_err(sb, "bogus logical sector size %u", logical_sect);
+ return -EIO;
+ }
+@@ -451,6 +450,25 @@ static int exfat_read_boot_sector(struct super_block *sb)
+ return -EINVAL;
+ }
+
++ /*
++ * sect_size_bits could be at least 9 and at most 12.
++ */
++ if (p_boot->sect_size_bits < EXFAT_MIN_SECT_SIZE_BITS ||
++ p_boot->sect_size_bits > EXFAT_MAX_SECT_SIZE_BITS) {
++ exfat_err(sb, "bogus sector size bits : %u\n",
++ p_boot->sect_size_bits);
++ return -EINVAL;
++ }
++
++ /*
++ * sect_per_clus_bits could be at least 0 and at most 25 - sect_size_bits.
++ */
++ if (p_boot->sect_per_clus_bits > EXFAT_MAX_SECT_PER_CLUS_BITS(p_boot)) {
++ exfat_err(sb, "bogus sectors bits per cluster : %u\n",
++ p_boot->sect_per_clus_bits);
++ return -EINVAL;
++ }
++
+ sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits;
+ sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits;
+ sbi->cluster_size_bits = p_boot->sect_per_clus_bits +
+@@ -477,16 +495,19 @@ static int exfat_read_boot_sector(struct super_block *sb)
+ sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
+
+ /* check consistencies */
+- if (sbi->num_FAT_sectors << p_boot->sect_size_bits <
+- sbi->num_clusters * 4) {
++ if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits <
++ (u64)sbi->num_clusters * 4) {
+ exfat_err(sb, "bogus fat length");
+ return -EINVAL;
+ }
++
+ if (sbi->data_start_sector <
+- sbi->FAT1_start_sector + sbi->num_FAT_sectors * p_boot->num_fats) {
++ (u64)sbi->FAT1_start_sector +
++ (u64)sbi->num_FAT_sectors * p_boot->num_fats) {
+ exfat_err(sb, "bogus data start sector");
+ return -EINVAL;
+ }
++
+ if (sbi->vol_flags & VOLUME_DIRTY)
+ exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ if (sbi->vol_flags & MEDIA_FAILURE)
+diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
+index 619dd35ddd48a..86699c8cab281 100644
+--- a/fs/ext4/Kconfig
++++ b/fs/ext4/Kconfig
+@@ -103,8 +103,7 @@ config EXT4_DEBUG
+
+ config EXT4_KUNIT_TESTS
+ tristate "KUnit tests for ext4" if !KUNIT_ALL_TESTS
+- select EXT4_FS
+- depends on KUNIT
++ depends on EXT4_FS && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the ext4 KUnit tests.
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index cf652ba3e74d2..df0368d578b16 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2401,11 +2401,10 @@ again:
+ (frame - 1)->bh);
+ if (err)
+ goto journal_error;
+- if (restart) {
+- err = ext4_handle_dirty_dx_node(handle, dir,
+- frame->bh);
++ err = ext4_handle_dirty_dx_node(handle, dir,
++ frame->bh);
++ if (err)
+ goto journal_error;
+- }
+ } else {
+ struct dx_root *dxroot;
+ memcpy((char *) entries2, (char *) entries,
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 4bcbacfe33259..7a774c9e4cb89 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1415,7 +1415,7 @@ retry_write:
+
+ ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
+ NULL, NULL, wbc, io_type,
+- compr_blocks);
++ compr_blocks, false);
+ if (ret) {
+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ unlock_page(cc->rpages[i]);
+@@ -1450,6 +1450,9 @@ retry_write:
+
+ *submitted += _submitted;
+ }
++
++ f2fs_balance_fs(F2FS_M_SB(mapping), true);
++
+ return 0;
+ out_err:
+ for (++i; i < cc->cluster_size; i++) {
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index aa34d620bec98..4d3ebf094f6d7 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -499,7 +499,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
+ if (f2fs_lfs_mode(sbi) && current->plug)
+ blk_finish_plug(current->plug);
+
+- if (F2FS_IO_ALIGNED(sbi))
++ if (!F2FS_IO_ALIGNED(sbi))
+ goto submit_io;
+
+ start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
+@@ -2743,7 +2743,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ sector_t *last_block,
+ struct writeback_control *wbc,
+ enum iostat_type io_type,
+- int compr_blocks)
++ int compr_blocks,
++ bool allow_balance)
+ {
+ struct inode *inode = page->mapping->host;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+@@ -2881,7 +2882,7 @@ out:
+ }
+ unlock_page(page);
+ if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
+- !F2FS_I(inode)->cp_task)
++ !F2FS_I(inode)->cp_task && allow_balance)
+ f2fs_balance_fs(sbi, need_balance_fs);
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+@@ -2928,7 +2929,7 @@ out:
+ #endif
+
+ return f2fs_write_single_data_page(page, NULL, NULL, NULL,
+- wbc, FS_DATA_IO, 0);
++ wbc, FS_DATA_IO, 0, true);
+ }
+
+ /*
+@@ -3096,7 +3097,8 @@ continue_unlock:
+ }
+ #endif
+ ret = f2fs_write_single_data_page(page, &submitted,
+- &bio, &last_block, wbc, io_type, 0);
++ &bio, &last_block, wbc, io_type,
++ 0, true);
+ if (ret == AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
+ #ifdef CONFIG_F2FS_FS_COMPRESSION
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index bb11759191dcc..1578402c58444 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3469,7 +3469,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
+ struct bio **bio, sector_t *last_block,
+ struct writeback_control *wbc,
+ enum iostat_type io_type,
+- int compr_blocks);
++ int compr_blocks, bool allow_balance);
+ void f2fs_invalidate_page(struct page *page, unsigned int offset,
+ unsigned int length);
+ int f2fs_release_page(struct page *page, gfp_t wait);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index f585545277d77..d5ebc67c7130b 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -60,6 +60,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
+ bool need_alloc = true;
+ int err = 0;
+
++ if (unlikely(IS_IMMUTABLE(inode)))
++ return VM_FAULT_SIGBUS;
++
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
+ goto err;
+@@ -767,6 +770,10 @@ int f2fs_truncate(struct inode *inode)
+ return -EIO;
+ }
+
++ err = dquot_initialize(inode);
++ if (err)
++ return err;
++
+ /* we should check inline_data size */
+ if (!f2fs_may_inline_data(inode)) {
+ err = f2fs_convert_inline_inode(inode);
+@@ -848,7 +855,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+
+- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
++ if (!in_group_p(inode->i_gid) &&
++ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ mode &= ~S_ISGID;
+ set_acl_inode(inode, mode);
+ }
+@@ -865,6 +873,14 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ return -EIO;
+
++ if (unlikely(IS_IMMUTABLE(inode)))
++ return -EPERM;
++
++ if (unlikely(IS_APPEND(inode) &&
++ (attr->ia_valid & (ATTR_MODE | ATTR_UID |
++ ATTR_GID | ATTR_TIMES_SET))))
++ return -EPERM;
++
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ !f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+@@ -4043,8 +4059,10 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
+
+ for (i = 0; i < page_len; i++, redirty_idx++) {
+ page = find_lock_page(mapping, redirty_idx);
+- if (!page)
+- ret = -ENOENT;
++ if (!page) {
++ ret = -ENOMEM;
++ break;
++ }
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+ f2fs_put_page(page, 0);
+@@ -4349,6 +4367,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ inode_lock(inode);
+ }
+
++ if (unlikely(IS_IMMUTABLE(inode))) {
++ ret = -EPERM;
++ goto unlock;
++ }
++
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0) {
+ bool preallocated = false;
+@@ -4413,6 +4436,7 @@ write:
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
+ }
++unlock:
+ inode_unlock(inode);
+ out:
+ trace_f2fs_file_write_iter(inode, iocb->ki_pos,
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 806ebabf58706..993caefcd2bb0 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -192,6 +192,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
+ f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
+ return 0;
+
++ err = dquot_initialize(inode);
++ if (err)
++ return err;
++
+ page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
+ if (!page)
+ return -ENOMEM;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index b4a07fe62d1a5..972736d71fa4d 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1796,6 +1796,9 @@ restore_flag:
+
+ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+ {
++ /* we should flush all the data to keep data consistency */
++ sync_inodes_sb(sbi->sb);
++
+ down_write(&sbi->gc_lock);
+ f2fs_dirty_to_prefree(sbi);
+
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 62d9081d1e26e..a1f9dde33058f 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+
+ gfs2_inplace_release(ip);
+
++ if (ip->i_qadata && ip->i_qadata->qa_qd_num)
++ gfs2_quota_unlock(ip);
++
+ if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+ /* Deallocate blocks that were just allocated. */
+ loff_t blockmask = i_blocksize(inode) - 1;
+@@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ }
+ }
+
+- if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+- gfs2_quota_unlock(ip);
+-
+ if (unlikely(!written))
+ goto out_unlock;
+
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index 9f2b5609f225d..153272f82984b 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+- int lvb_needs_unlock = 0;
+ int error;
+
+ if (gl->gl_lksb.sb_lkid == 0) {
+@@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
+ gfs2_update_request_times(gl);
+
+- /* don't want to skip dlm_unlock writing the lvb when lock is ex */
+-
+- if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
+- lvb_needs_unlock = 1;
++ /* don't want to skip dlm_unlock writing the lvb when lock has one */
+
+ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+- !lvb_needs_unlock) {
++ !gl->gl_lksb.sb_lvbptr) {
+ gfs2_glock_free(gl);
+ return;
+ }
+diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
+index c26c68ebd29d4..a3c1911862f01 100644
+--- a/fs/gfs2/recovery.c
++++ b/fs/gfs2/recovery.c
+@@ -514,8 +514,10 @@ void gfs2_recover_func(struct work_struct *work)
+ error = foreach_descriptor(jd, head.lh_tail,
+ head.lh_blkno, pass);
+ lops_after_scan(jd, error, pass);
+- if (error)
++ if (error) {
++ up_read(&sdp->sd_log_flush_lock);
+ goto fail_gunlock_thaw;
++ }
+ }
+
+ recover_local_statfs(jd, &head);
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index a374397f42730..574bea29f21ee 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -93,9 +93,10 @@ out_unlock:
+
+ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ {
+- struct gfs2_glock *gl = sdp->sd_live_gh.gh_gl;
++ struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
+ struct inode *inode = sdp->sd_jdesc->jd_inode;
+ struct gfs2_inode *ip = GFS2_I(inode);
++ struct gfs2_glock *i_gl = ip->i_gl;
+ u64 no_formal_ino = ip->i_no_formal_ino;
+ int ret = 0;
+ int tries;
+@@ -141,7 +142,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ thaw_super(sdp->sd_vfs);
+ } else {
+- wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
++ wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
++ TASK_UNINTERRUPTIBLE);
+ }
+
+ /*
+@@ -161,15 +163,15 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ * on other nodes to be successful, otherwise we remain the owner of
+ * the glock as far as dlm is concerned.
+ */
+- if (gl->gl_ops->go_free) {
+- set_bit(GLF_FREEING, &gl->gl_flags);
+- wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
++ if (i_gl->gl_ops->go_free) {
++ set_bit(GLF_FREEING, &i_gl->gl_flags);
++ wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
+ }
+
+ /*
+ * Dequeue the "live" glock, but keep a reference so it's never freed.
+ */
+- gfs2_glock_hold(gl);
++ gfs2_glock_hold(live_gl);
+ gfs2_glock_dq_wait(&sdp->sd_live_gh);
+ /*
+ * We enqueue the "live" glock in EX so that all other nodes
+@@ -208,7 +210,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ gfs2_glock_nq(&sdp->sd_live_gh);
+ }
+
+- gfs2_glock_queue_put(gl); /* drop the extra reference we acquired */
++ gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
+ clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+
+ /*
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 931671082e615..4d0ede0418571 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -8723,8 +8723,21 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ smp_rmb();
+ if (!io_sqring_full(ctx))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+- io_cqring_overflow_flush(ctx, false, NULL, NULL);
+- if (io_cqring_events(ctx))
++
++ /*
++ * Don't flush cqring overflow list here, just do a simple check.
++ * Otherwise there could possible be ABBA deadlock:
++ * CPU0 CPU1
++ * ---- ----
++ * lock(&ctx->uring_lock);
++ * lock(&ep->mtx);
++ * lock(&ctx->uring_lock);
++ * lock(&ep->mtx);
++ *
++ * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
++ * pushs them to do the flush.
++ */
++ if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
+index f0fe641893a5e..b9e6a7ec78be4 100644
+--- a/fs/isofs/dir.c
++++ b/fs/isofs/dir.c
+@@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
+ printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ " in block %lu of inode %lu\n", block,
+ inode->i_ino);
++ brelse(bh);
+ return -EIO;
+ }
+
+diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
+index 402769881c32b..58f80e1b3ac0d 100644
+--- a/fs/isofs/namei.c
++++ b/fs/isofs/namei.c
+@@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
+ printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ " in block %lu of inode %lu\n", block,
+ dir->i_ino);
++ brelse(bh);
+ return 0;
+ }
+
+diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
+index be7c8a6a57480..4fe64519870f1 100644
+--- a/fs/jffs2/summary.c
++++ b/fs/jffs2/summary.c
+@@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
+ dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
+ je16_to_cpu(temp->u.nodetype));
+ jffs2_sum_disable_collecting(c->summary);
++ /* The above call removes the list, nothing more to do */
++ goto bail_rwcompat;
+ } else {
+ BUG(); /* unknown node in summary information */
+ }
+@@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
+
+ c->summary->sum_num--;
+ }
++ bail_rwcompat:
+
+ jffs2_sum_reset_collected(c->summary);
+
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 94b7c1cb5ceb3..7aee156086198 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1656,7 +1656,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
+ } else if (rc == -ENOSPC) {
+ /* search for next smaller log2 block */
+ l2nb = BLKSTOL2(nblocks) - 1;
+- nblocks = 1 << l2nb;
++ nblocks = 1LL << l2nb;
+ } else {
+ /* Trim any already allocated blocks */
+ jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2f4679a62712a..fc8bbfd9beb36 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5438,15 +5438,16 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
+
+ if (cache_validity & NFS_INO_INVALID_ATIME)
+ bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
+- if (cache_validity & NFS_INO_INVALID_ACCESS)
+- bitmask[0] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
+- FATTR4_WORD1_OWNER_GROUP;
+- if (cache_validity & NFS_INO_INVALID_ACL)
+- bitmask[0] |= FATTR4_WORD0_ACL;
+- if (cache_validity & NFS_INO_INVALID_LABEL)
++ if (cache_validity & NFS_INO_INVALID_OTHER)
++ bitmask[1] |= FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER |
++ FATTR4_WORD1_OWNER_GROUP |
++ FATTR4_WORD1_NUMLINKS;
++ if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
+ bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
+- if (cache_validity & NFS_INO_INVALID_CTIME)
++ if (cache_validity & NFS_INO_INVALID_CHANGE)
+ bitmask[0] |= FATTR4_WORD0_CHANGE;
++ if (cache_validity & NFS_INO_INVALID_CTIME)
++ bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
+ if (cache_validity & NFS_INO_INVALID_MTIME)
+ bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
+ if (cache_validity & NFS_INO_INVALID_SIZE)
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index f6d5d783f4a45..0759e589ab52b 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1522,12 +1522,9 @@ static int __init init_nfsd(void)
+ int retval;
+ printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+
+- retval = register_pernet_subsys(&nfsd_net_ops);
+- if (retval < 0)
+- return retval;
+ retval = register_cld_notifier();
+ if (retval)
+- goto out_unregister_pernet;
++ return retval;
+ retval = nfsd4_init_slabs();
+ if (retval)
+ goto out_unregister_notifier;
+@@ -1544,9 +1541,14 @@ static int __init init_nfsd(void)
+ goto out_free_lockd;
+ retval = register_filesystem(&nfsd_fs_type);
+ if (retval)
++ goto out_free_exports;
++ retval = register_pernet_subsys(&nfsd_net_ops);
++ if (retval < 0)
+ goto out_free_all;
+ return 0;
+ out_free_all:
++ unregister_pernet_subsys(&nfsd_net_ops);
++out_free_exports:
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+ out_free_lockd:
+@@ -1559,13 +1561,12 @@ out_free_slabs:
+ nfsd4_free_slabs();
+ out_unregister_notifier:
+ unregister_cld_notifier();
+-out_unregister_pernet:
+- unregister_pernet_subsys(&nfsd_net_ops);
+ return retval;
+ }
+
+ static void __exit exit_nfsd(void)
+ {
++ unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_drc_slab_free();
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+@@ -1575,7 +1576,6 @@ static void __exit exit_nfsd(void)
+ nfsd4_exit_pnfs();
+ unregister_filesystem(&nfsd_fs_type);
+ unregister_cld_notifier();
+- unregister_pernet_subsys(&nfsd_net_ops);
+ }
+
+ MODULE_AUTHOR("Olaf Kirch ");
+diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
+index 0179a73a3fa2c..12a7590601ddb 100644
+--- a/fs/ocfs2/cluster/heartbeat.c
++++ b/fs/ocfs2/cluster/heartbeat.c
+@@ -2042,7 +2042,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
+ o2hb_nego_timeout_handler,
+ reg, NULL, ®->hr_handler_list);
+ if (ret)
+- goto free;
++ goto remove_item;
+
+ ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
+ sizeof(struct o2hb_nego_msg),
+@@ -2057,6 +2057,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
+
+ unregister_handler:
+ o2net_unregister_handler_list(®->hr_handler_list);
++remove_item:
++ spin_lock(&o2hb_live_lock);
++ list_del(®->hr_all_item);
++ if (o2hb_global_heartbeat_active())
++ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
++ spin_unlock(&o2hb_live_lock);
+ free:
+ kfree(reg);
+ return ERR_PTR(ret);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index d2018f70d1fae..070d2df8ab9cf 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -571,7 +571,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
+ error = -ENOMEM;
+ if (count >= KMALLOC_MAX_SIZE)
+ goto out;
+- kbuf = kzalloc(count + 1, GFP_KERNEL);
++ kbuf = kvzalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ goto out;
+
+@@ -600,7 +600,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
+
+ error = count;
+ out_free_buf:
+- kfree(kbuf);
++ kvfree(kbuf);
+ out:
+ sysctl_head_finish(head);
+
+diff --git a/fs/proc/self.c b/fs/proc/self.c
+index cc71ce3466dc0..a4012154e1096 100644
+--- a/fs/proc/self.c
++++ b/fs/proc/self.c
+@@ -20,7 +20,7 @@ static const char *proc_self_get_link(struct dentry *dentry,
+ * Not currently supported. Once we can inherit all of struct pid,
+ * we can allow this.
+ */
+- if (current->flags & PF_KTHREAD)
++ if (current->flags & PF_IO_WORKER)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!tgid)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 602e3a52884d8..3cec6fbef725e 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1210,7 +1210,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ enum clear_refs_types type;
+- struct mmu_gather tlb;
+ int itype;
+ int rv;
+
+@@ -1249,7 +1248,6 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ goto out_unlock;
+ }
+
+- tlb_gather_mmu(&tlb, mm, 0, -1);
+ if (type == CLEAR_REFS_SOFT_DIRTY) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (!(vma->vm_flags & VM_SOFTDIRTY))
+@@ -1258,15 +1256,18 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ vma_set_page_prot(vma);
+ }
+
++ inc_tlb_flush_pending(mm);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
+ 0, NULL, mm, 0, -1UL);
+ mmu_notifier_invalidate_range_start(&range);
+ }
+ walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
+ &cp);
+- if (type == CLEAR_REFS_SOFT_DIRTY)
++ if (type == CLEAR_REFS_SOFT_DIRTY) {
+ mmu_notifier_invalidate_range_end(&range);
+- tlb_finish_mmu(&tlb, 0, -1);
++ flush_tlb_mm(mm);
++ dec_tlb_flush_pending(mm);
++ }
+ out_unlock:
+ mmap_write_unlock(mm);
+ out_mm:
+diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
+index a553273fbd417..d56681d86d28a 100644
+--- a/fs/proc/thread_self.c
++++ b/fs/proc/thread_self.c
+@@ -17,6 +17,13 @@ static const char *proc_thread_self_get_link(struct dentry *dentry,
+ pid_t pid = task_pid_nr_ns(current, ns);
+ char *name;
+
++ /*
++ * Not currently supported. Once we can inherit all of struct pid,
++ * we can allow this.
++ */
++ if (current->flags & PF_IO_WORKER)
++ return ERR_PTR(-EOPNOTSUPP);
++
+ if (!pid)
+ return ERR_PTR(-ENOENT);
+ name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC);
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 32f64abc277c3..d963ae7902f92 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -269,7 +269,7 @@ static int pstore_compress(const void *in, void *out,
+ {
+ int ret;
+
+- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
++ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
+ return -EINVAL;
+
+ ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
+@@ -671,7 +671,7 @@ static void decompress_record(struct pstore_record *record)
+ int unzipped_len;
+ char *unzipped, *workspace;
+
+- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
++ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
+ return;
+
+ /* Only PSTORE_TYPE_DMESG support compression. */
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index c21106557a37e..b1467f3921c28 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -164,19 +164,24 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
+ (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
+ i_size_read(sb_dqopt(sb)->files[type]));
+- goto out;
++ goto out_free;
+ }
+ if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+ quota_error(sb, "Free block number too big (%u >= %u).",
+ qinfo->dqi_free_blk, qinfo->dqi_blocks);
+- goto out;
++ goto out_free;
+ }
+ if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+ quota_error(sb, "Block with free entry too big (%u >= %u).",
+ qinfo->dqi_free_entry, qinfo->dqi_blocks);
+- goto out;
++ goto out_free;
+ }
+ ret = 0;
++out_free:
++ if (ret) {
++ kfree(info->dqi_priv);
++ info->dqi_priv = NULL;
++ }
+ out:
+ up_read(&dqopt->dqio_sem);
+ return ret;
+diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
+index 51a7c8c2c3f0a..e564d5ff87816 100644
+--- a/fs/ubifs/auth.c
++++ b/fs/ubifs/auth.c
+@@ -327,7 +327,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
+ ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)",
+ hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ);
+ err = -EINVAL;
+- goto out_free_hash;
++ goto out_free_hmac;
+ }
+
+ err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen);
+diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
+index 79801c9a5b874..0f8a6a16421b4 100644
+--- a/fs/ubifs/replay.c
++++ b/fs/ubifs/replay.c
+@@ -559,7 +559,9 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
+ }
+
+ /* authenticate_sleb_hash is split out for stack usage */
+-static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
++static int noinline_for_stack
++authenticate_sleb_hash(struct ubifs_info *c,
++ struct shash_desc *log_hash, u8 *hash)
+ {
+ SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
+
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 138b9426c6c18..ddb2ca636c93d 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -838,8 +838,10 @@ static int alloc_wbufs(struct ubifs_info *c)
+ c->jheads[i].wbuf.jhead = i;
+ c->jheads[i].grouped = 1;
+ c->jheads[i].log_hash = ubifs_hash_get_desc(c);
+- if (IS_ERR(c->jheads[i].log_hash))
++ if (IS_ERR(c->jheads[i].log_hash)) {
++ err = PTR_ERR(c->jheads[i].log_hash);
+ goto out;
++ }
+ }
+
+ /*
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index bec47f2d074be..3fe933b1010c3 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -250,6 +250,9 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
+ }
+ inode->i_mode &= ~0222;
+ return i_size_read(inode);
++ case BLK_ZONE_COND_FULL:
++ /* The write pointer of full zones is invalid. */
++ return zi->i_max_size;
+ default:
+ if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+ return zi->i_max_size;
+diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
+index 2fc624a617690..f8a4afb0279a3 100644
+--- a/include/acpi/acexcep.h
++++ b/include/acpi/acexcep.h
+@@ -59,11 +59,11 @@ struct acpi_exception_info {
+
+ #define AE_OK (acpi_status) 0x0000
+
+-#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
+-#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
+-#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
+-#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
+-#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
++#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
++#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML)
++#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
++#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
++#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
+
+ /*
+ * Environmental exceptions
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index b97c628ad91ff..34d8287cd7749 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -828,8 +828,13 @@
+ /* DWARF 4 */ \
+ .debug_types 0 : { *(.debug_types) } \
+ /* DWARF 5 */ \
++ .debug_addr 0 : { *(.debug_addr) } \
++ .debug_line_str 0 : { *(.debug_line_str) } \
++ .debug_loclists 0 : { *(.debug_loclists) } \
+ .debug_macro 0 : { *(.debug_macro) } \
+- .debug_addr 0 : { *(.debug_addr) }
++ .debug_names 0 : { *(.debug_names) } \
++ .debug_rnglists 0 : { *(.debug_rnglists) } \
++ .debug_str_offsets 0 : { *(.debug_str_offsets) }
+
+ /* Stabs debugging sections. */
+ #define STABS_DEBUG \
+@@ -988,12 +993,13 @@
+ #endif
+
+ /*
+- * Clang's -fsanitize=kernel-address and -fsanitize=thread produce
+- * unwanted sections (.eh_frame and .init_array.*), but
+- * CONFIG_CONSTRUCTORS wants to keep any .init_array.* sections.
++ * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
++ * -fsanitize=thread produce unwanted sections (.eh_frame
++ * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
++ * keep any .init_array.* sections.
+ * https://bugs.llvm.org/show_bug.cgi?id=46478
+ */
+-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
++#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
+ # ifdef CONFIG_CONSTRUCTORS
+ # define SANITIZER_DISCARDS \
+ *(.eh_frame)
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 07cb5d15e7439..6e585dbc10df3 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1206,8 +1206,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
+ void bpf_prog_inc(struct bpf_prog *prog);
+ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
+ void bpf_prog_put(struct bpf_prog *prog);
+-void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+- struct bpf_map **used_maps, u32 len);
+
+ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+@@ -1403,7 +1401,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+ /* verify correctness of eBPF program */
+ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ union bpf_attr __user *uattr);
++
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
++#endif
+
+ struct btf *bpf_get_btf_vmlinux(void);
+
+@@ -1673,6 +1674,9 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ return bpf_prog_get_type_dev(ufd, type, false);
+ }
+
++void __bpf_free_used_maps(struct bpf_prog_aux *aux,
++ struct bpf_map **used_maps, u32 len);
++
+ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
+
+ int bpf_prog_offload_compile(struct bpf_prog *prog);
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 61a66fb8ebb34..d2d7f9b6a2761 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -325,6 +325,11 @@ struct dm_target {
+ * whether or not its underlying devices have support.
+ */
+ bool discards_supported:1;
++
++ /*
++ * Set if we need to limit the number of in-flight bios when swapping.
++ */
++ bool limit_swap_bios:1;
+ };
+
+ void *dm_per_bio_data(struct bio *bio, size_t data_size);
+diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
+index 9b93f8584ff7d..8b2b1d68b9545 100644
+--- a/include/linux/entry-kvm.h
++++ b/include/linux/entry-kvm.h
+@@ -46,6 +46,20 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ */
+ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+
++/**
++ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
++ * need to be handled while IRQs are disabled
++ * upon entering to guest.
++ *
++ * Has to be invoked with interrupts disabled before the last call
++ * to xfer_to_guest_mode_work_pending().
++ */
++static inline void xfer_to_guest_mode_prepare(void)
++{
++ lockdep_assert_irqs_disabled();
++ rcu_nocb_flush_deferred_wakeup();
++}
++
+ /**
+ * __xfer_to_guest_mode_work_pending - Check if work is pending
+ *
+diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
+index 0350393465d42..593322c946e63 100644
+--- a/include/linux/eventpoll.h
++++ b/include/linux/eventpoll.h
+@@ -18,7 +18,7 @@ struct file;
+
+ #ifdef CONFIG_EPOLL
+
+-#ifdef CONFIG_CHECKPOINT_RESTORE
++#ifdef CONFIG_KCMP
+ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
+ #endif
+
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 29c27656165b2..5edf2b6608812 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -886,7 +886,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
+ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+ #define __bpf_call_base_args \
+ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+- __bpf_call_base)
++ (void *)__bpf_call_base)
+
+ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+ void bpf_jit_compile(struct bpf_prog *prog);
+diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
+index 1b3371ae81936..9055cb380ee24 100644
+--- a/include/linux/icmpv6.h
++++ b/include/linux/icmpv6.h
+@@ -3,6 +3,7 @@
+ #define _LINUX_ICMPV6_H
+
+ #include
++#include
+ #include
+
+ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+@@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+ #if IS_ENABLED(CONFIG_IPV6)
+
+ typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+- const struct in6_addr *force_saddr);
+-#if IS_BUILTIN(CONFIG_IPV6)
++ const struct in6_addr *force_saddr,
++ const struct inet6_skb_parm *parm);
+ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+- const struct in6_addr *force_saddr);
+-static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++ const struct in6_addr *force_saddr,
++ const struct inet6_skb_parm *parm);
++#if IS_BUILTIN(CONFIG_IPV6)
++static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct inet6_skb_parm *parm)
+ {
+- icmp6_send(skb, type, code, info, NULL);
++ icmp6_send(skb, type, code, info, NULL, parm);
+ }
+ static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+ {
+@@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+ return 0;
+ }
+ #else
+-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
++extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct inet6_skb_parm *parm);
+ extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
+ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+ #endif
+
++static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++{
++ __icmpv6_send(skb, type, code, info, IP6CB(skb));
++}
++
+ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ unsigned int data_len);
+
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+ #else
+-#define icmpv6_ndo_send icmpv6_send
++static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
++{
++ struct inet6_skb_parm parm = { 0 };
++ __icmpv6_send(skb_in, type, code, info, &parm);
++}
+ #endif
+
+ #else
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index efa96263b81b3..d63d3e9cc7b67 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -170,7 +170,7 @@ enum iommu_dev_features {
+ * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
+ *
+ * @start: IOVA representing the start of the range to be flushed
+- * @end: IOVA representing the end of the range to be flushed (exclusive)
++ * @end: IOVA representing the end of the range to be flushed (inclusive)
+ * @pgsize: The interval at which to perform the flush
+ *
+ * This structure is intended to be updated by multiple calls to the
+@@ -538,7 +538,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+ {
+- unsigned long start = iova, end = start + size;
++ unsigned long start = iova, end = start + size - 1;
+
+ /*
+ * If the new page is disjoint from the current range or is mapped at
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index dda61d150a138..f514a7dd8c9cf 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -84,7 +84,6 @@ struct ipv6_params {
+ __s32 autoconf;
+ };
+ extern struct ipv6_params ipv6_defaults;
+-#include
+ #include
+ #include
+
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index 9e93bef529680..5f61389f5f361 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -300,6 +300,11 @@ struct kimage {
+ /* Information for loading purgatory */
+ struct purgatory_info purgatory_info;
+ #endif
++
++#ifdef CONFIG_IMA_KEXEC
++ /* Virtual address of IMA measurement buffer for kexec syscall */
++ void *ima_buffer;
++#endif
+ };
+
+ /* kexec interface functions */
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 0f2e24f13c2bd..eed3ce139a32e 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -289,6 +289,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
+ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
+ #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
++#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
+
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index 0d6cf64c8bb12..3c755f6eaefd8 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -360,9 +360,11 @@ extern atomic_t kgdb_active;
+ extern bool dbg_is_early;
+ extern void __init dbg_late_init(void);
+ extern void kgdb_panic(const char *msg);
++extern void kgdb_free_init_mem(void);
+ #else /* ! CONFIG_KGDB */
+ #define in_dbg_master() (0)
+ #define dbg_late_init()
+ static inline void kgdb_panic(const char *msg) {}
++static inline void kgdb_free_init_mem(void) { }
+ #endif /* ! CONFIG_KGDB */
+ #endif /* _KGDB_H_ */
+diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
+index c941b73773216..2fcc01891b474 100644
+--- a/include/linux/khugepaged.h
++++ b/include/linux/khugepaged.h
+@@ -3,6 +3,7 @@
+ #define _LINUX_KHUGEPAGED_H
+
+ #include /* MMF_VM_HUGEPAGE */
++#include
+
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -57,6 +58,7 @@ static inline int khugepaged_enter(struct vm_area_struct *vma,
+ {
+ if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
+ if ((khugepaged_always() ||
++ (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
+ (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
+ !(vm_flags & VM_NOHUGEPAGE) &&
+ !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
+diff --git a/include/linux/memremap.h b/include/linux/memremap.h
+index 79c49e7f5c304..f5b464daeeca5 100644
+--- a/include/linux/memremap.h
++++ b/include/linux/memremap.h
+@@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+ void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
+ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ struct dev_pagemap *pgmap);
++bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
+
+ unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
+ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
+@@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+ return NULL;
+ }
+
++static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
++{
++ return false;
++}
++
+ static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+ {
+ return 0;
+diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
+index 4283b5b33e040..2b85b9deb03ae 100644
+--- a/include/linux/mfd/rohm-generic.h
++++ b/include/linux/mfd/rohm-generic.h
+@@ -20,14 +20,12 @@ struct rohm_regmap_dev {
+ struct regmap *regmap;
+ };
+
+-enum {
+- ROHM_DVS_LEVEL_UNKNOWN,
+- ROHM_DVS_LEVEL_RUN,
+- ROHM_DVS_LEVEL_IDLE,
+- ROHM_DVS_LEVEL_SUSPEND,
+- ROHM_DVS_LEVEL_LPSR,
+- ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
+-};
++#define ROHM_DVS_LEVEL_RUN BIT(0)
++#define ROHM_DVS_LEVEL_IDLE BIT(1)
++#define ROHM_DVS_LEVEL_SUSPEND BIT(2)
++#define ROHM_DVS_LEVEL_LPSR BIT(3)
++#define ROHM_DVS_LEVEL_VALID_AMOUNT 4
++#define ROHM_DVS_LEVEL_UNKNOWN 0
+
+ /**
+ * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index fd02c5fa60cb1..36c2119de7022 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -110,8 +110,10 @@ static inline void rcu_user_exit(void) { }
+
+ #ifdef CONFIG_RCU_NOCB_CPU
+ void rcu_init_nohz(void);
++void rcu_nocb_flush_deferred_wakeup(void);
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static inline void rcu_init_nohz(void) { }
++static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+ #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
+ /**
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 70085ca1a3fc9..def5c62c93b3b 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -213,7 +213,8 @@ struct page_vma_mapped_walk {
+
+ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
+ {
+- if (pvmw->pte)
++ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
++ if (pvmw->pte && !PageHuge(pvmw->page))
+ pte_unmap(pvmw->pte);
+ if (pvmw->ptl)
+ spin_unlock(pvmw->ptl);
+diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
+index f0b01b728640d..d08039d65825e 100644
+--- a/include/linux/soundwire/sdw.h
++++ b/include/linux/soundwire/sdw.h
+@@ -1005,6 +1005,8 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
+
+ int sdw_read(struct sdw_slave *slave, u32 addr);
+ int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
++int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
++int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
+ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 8f4ff39f51e7d..804a3f69bbd93 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -397,6 +397,10 @@ static inline u32 tpm2_rc_value(u32 rc)
+ #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+
+ extern int tpm_is_tpm2(struct tpm_chip *chip);
++extern __must_check int tpm_try_get_ops(struct tpm_chip *chip);
++extern void tpm_put_ops(struct tpm_chip *chip);
++extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
++ size_t min_rsp_body_length, const char *desc);
+ extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
+ struct tpm_digest *digest);
+ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+@@ -410,7 +414,6 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
+ {
+ return -ENODEV;
+ }
+-
+ static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx,
+ struct tpm_digest *digest)
+ {
+diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
+index b1e6043e99175..572a079761165 100644
+--- a/include/linux/tty_ldisc.h
++++ b/include/linux/tty_ldisc.h
+@@ -185,7 +185,8 @@ struct tty_ldisc_ops {
+ void (*close)(struct tty_struct *);
+ void (*flush_buffer)(struct tty_struct *tty);
+ ssize_t (*read)(struct tty_struct *tty, struct file *file,
+- unsigned char __user *buf, size_t nr);
++ unsigned char *buf, size_t nr,
++ void **cookie, unsigned long offset);
+ ssize_t (*write)(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr);
+ int (*ioctl)(struct tty_struct *tty, struct file *file,
+diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
+index 4807ca4d52e03..2a430e713ce51 100644
+--- a/include/linux/zsmalloc.h
++++ b/include/linux/zsmalloc.h
+@@ -35,7 +35,7 @@ enum zs_mapmode {
+
+ struct zs_pool_stats {
+ /* How many pages were migrated (freed) */
+- unsigned long pages_compacted;
++ atomic_long_t pages_compacted;
+ };
+
+ struct zs_pool;
+diff --git a/include/net/act_api.h b/include/net/act_api.h
+index 55dab604861fe..2bf3092ae7ecc 100644
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -166,6 +166,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags);
++void tcf_idr_insert_many(struct tc_action *actions[]);
+ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+@@ -186,10 +187,13 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr, int bind,
+ struct tc_action *actions[], size_t *attr_size,
+ bool rtnl_held, struct netlink_ext_ack *extack);
++struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
++ bool rtnl_held,
++ struct netlink_ext_ack *extack);
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind,
+- bool rtnl_held,
++ struct tc_action_ops *ops, bool rtnl_held,
+ struct netlink_ext_ack *extack);
+ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ int ref, bool terse);
+diff --git a/include/net/icmp.h b/include/net/icmp.h
+index 9ac2d2672a938..fd84adc479633 100644
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -46,7 +46,11 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+ #else
+-#define icmp_ndo_send icmp_send
++static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++ struct ip_options opts = { 0 };
++ __icmp_send(skb_in, type, code, info, &opts);
++}
+ #endif
+
+ int icmp_rcv(struct sk_buff *skb);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 25bbada379c46..244208f6f6c2a 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1431,8 +1431,13 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied);
+ */
+ static inline bool tcp_rmem_pressure(const struct sock *sk)
+ {
+- int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+- int threshold = rcvbuf - (rcvbuf >> 3);
++ int rcvbuf, threshold;
++
++ if (tcp_under_memory_pressure(sk))
++ return true;
++
++ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
++ threshold = rcvbuf - (rcvbuf >> 3);
+
+ return atomic_read(&sk->sk_rmem_alloc) > threshold;
+ }
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index 4e2d61e8fb1ed..e6a43163ab5b7 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -391,10 +391,6 @@ struct sas_ha_struct {
+ int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
+ * their siblings when forming wide ports */
+
+- /* LLDD calls these to notify the class of an event. */
+- int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
+- int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
+-
+ void *lldd_ha; /* not touched by sas class code */
+
+ struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
+@@ -706,4 +702,11 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev);
+
+ int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
+
++int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event);
++int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
++int sas_notify_port_event_gfp(struct asd_sas_phy *phy, enum port_event event,
++ gfp_t gfp_flags);
++int sas_notify_phy_event_gfp(struct asd_sas_phy *phy, enum phy_event event,
++ gfp_t gfp_flags);
++
+ #endif /* _SASLIB_H_ */
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index 723c8e23ca87d..5f42a14481bd4 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -1036,9 +1036,9 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
+ * Not all combinations are valid, and different SoCs may support different
+ * combinations of layout and options.
+ */
+-#define __fourcc_mod_amlogic_layout_mask 0xf
++#define __fourcc_mod_amlogic_layout_mask 0xff
+ #define __fourcc_mod_amlogic_options_shift 8
+-#define __fourcc_mod_amlogic_options_mask 0xf
++#define __fourcc_mod_amlogic_options_mask 0xff
+
+ #define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ fourcc_mod_code(AMLOGIC, \
+diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
+index b49fbf2bdc408..1c064627e6c33 100644
+--- a/include/uapi/drm/drm_mode.h
++++ b/include/uapi/drm/drm_mode.h
+@@ -414,15 +414,12 @@ enum drm_mode_subconnector {
+ *
+ * If the @count_modes field is set to zero, the kernel will perform a forced
+ * probe on the connector to refresh the connector status, modes and EDID.
+- * A forced-probe can be slow and the ioctl will block. A force-probe can cause
+- * flickering and temporary freezes, so it should not be performed
+- * automatically.
++ * A forced-probe can be slow, might cause flickering and the ioctl will block.
+ *
+- * User-space shouldn't need to force-probe connectors in general: the kernel
+- * will automatically take care of probing connectors that don't support
+- * hot-plug detection when appropriate. However, user-space may force-probe
+- * connectors on user request (e.g. clicking a "Scan connectors" button, or
+- * opening a UI to manage screens).
++ * User-space needs to force-probe connectors to ensure their metadata is
++ * up-to-date at startup and after receiving a hot-plug event. User-space
++ * may perform a forced-probe when the user explicitly requests it. User-space
++ * shouldn't perform a forced-probe in other situations.
+ */
+ struct drm_mode_get_connector {
+ /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */
+diff --git a/init/Kconfig b/init/Kconfig
+index 29ad683250288..b7d3c6a12196f 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1193,6 +1193,7 @@ endif # NAMESPACES
+ config CHECKPOINT_RESTORE
+ bool "Checkpoint/restore support"
+ select PROC_CHILDREN
++ select KCMP
+ default n
+ help
+ Enables additional kernel features in a sake of checkpoint/restore.
+@@ -1736,6 +1737,16 @@ config ARCH_HAS_MEMBARRIER_CALLBACKS
+ config ARCH_HAS_MEMBARRIER_SYNC_CORE
+ bool
+
++config KCMP
++ bool "Enable kcmp() system call" if EXPERT
++ help
++ Enable the kernel resource comparison system call. It provides
++ user-space with the ability to compare two processes to see if they
++ share a common resource, such as a file descriptor or even virtual
++ memory space.
++
++ If unsure, say N.
++
+ config RSEQ
+ bool "Enable rseq() system call" if EXPERT
+ default y
+diff --git a/init/main.c b/init/main.c
+index a626e78dbf061..aeef291bf28df 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1423,6 +1423,7 @@ static int __ref kernel_init(void *unused)
+ async_synchronize_full();
+ kprobe_free_init_mem();
+ ftrace_free_init_mem();
++ kgdb_free_init_mem();
+ free_initmem();
+ mark_readonly();
+
+diff --git a/kernel/Makefile b/kernel/Makefile
+index aa7368c7eabf3..320f1f3941b79 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -51,7 +51,7 @@ obj-y += livepatch/
+ obj-y += dma/
+ obj-y += entry/
+
+-obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
++obj-$(CONFIG_KCMP) += kcmp.o
+ obj-$(CONFIG_FREEZER) += freezer.o
+ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
+index 5454161407f1f..a0d9eade9c804 100644
+--- a/kernel/bpf/bpf_iter.c
++++ b/kernel/bpf/bpf_iter.c
+@@ -287,7 +287,7 @@ int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
+ {
+ struct bpf_iter_target_info *tinfo;
+
+- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
++ tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
+ if (!tinfo)
+ return -ENOMEM;
+
+diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
+index 1b6b9349cb857..d99e89f113c43 100644
+--- a/kernel/bpf/bpf_lru_list.c
++++ b/kernel/bpf/bpf_lru_list.c
+@@ -502,13 +502,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
+ static void bpf_common_lru_push_free(struct bpf_lru *lru,
+ struct bpf_lru_node *node)
+ {
++ u8 node_type = READ_ONCE(node->type);
+ unsigned long flags;
+
+- if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
+- WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
++ if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
++ WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
+ return;
+
+- if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
++ if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
+ struct bpf_lru_locallist *loc_l;
+
+ loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index f6e9c68afdd42..85d9d1b72a33a 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -802,9 +802,7 @@ static int dev_map_notification(struct notifier_block *notifier,
+ break;
+
+ /* will be freed in free_netdev() */
+- netdev->xdp_bulkq =
+- __alloc_percpu_gfp(sizeof(struct xdp_dev_bulk_queue),
+- sizeof(void *), GFP_ATOMIC);
++ netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
+ if (!netdev->xdp_bulkq)
+ return NOTIFY_BAD;
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 20babdd06278f..33683eafea90e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -4834,8 +4834,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ subprog);
+ clear_caller_saved_regs(env, caller->regs);
+
+- /* All global functions return SCALAR_VALUE */
++ /* All global functions return a 64-bit SCALAR_VALUE */
+ mark_reg_unknown(env, caller->regs, BPF_REG_0);
++ caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
+
+ /* continue with next insn after call */
+ return 0;
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index af6e8b4fb3599..c0bb31e683e99 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -456,6 +456,17 @@ setundefined:
+ return 0;
+ }
+
++void kgdb_free_init_mem(void)
++{
++ int i;
++
++ /* Clear init memory breakpoints. */
++ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
++ if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
++ kgdb_break[i].state = BP_UNDEFINED;
++ }
++}
++
+ #ifdef CONFIG_KGDB_KDB
+ void kdb_dump_stack_on_cpu(int cpu)
+ {
+diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
+index a4281fb99299e..81874213b0fe9 100644
+--- a/kernel/debug/kdb/kdb_private.h
++++ b/kernel/debug/kdb/kdb_private.h
+@@ -230,7 +230,7 @@ extern struct task_struct *kdb_curr_task(int);
+
+ #define kdb_task_has_cpu(p) (task_curr(p))
+
+-#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
++#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
+
+ extern void *debug_kmalloc(size_t size, gfp_t flags);
+ extern void debug_kfree(void *);
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index f9d491b17b78b..1ef9b15ceec9b 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -184,6 +184,10 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ * enabled above.
+ */
+ local_irq_disable_exit_to_user();
++
++ /* Check if any of the above work has queued a deferred wakeup */
++ rcu_nocb_flush_deferred_wakeup();
++
+ ti_work = READ_ONCE(current_thread_info()->flags);
+ }
+
+@@ -197,6 +201,9 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
+
+ lockdep_assert_irqs_disabled();
+
++ /* Flush pending rcuog wakeup before the last need_resched() check */
++ rcu_nocb_flush_deferred_wakeup();
++
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 3994a217bde76..3bf98db9c702d 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -12,7 +12,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include
+ #include
+
+@@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
+ static DEFINE_PER_CPU(long, kcsan_skip);
+
+ /* For kcsan_prandom_u32_max(). */
+-static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
++static DEFINE_PER_CPU(u32, kcsan_rand_state);
+
+ static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
+ size_t size,
+@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
+ }
+
+ /*
+- * Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
+- * for more details.
+- *
+- * The open-coded version here is using only safe primitives for all contexts
+- * where we can have KCSAN instrumentation. In particular, we cannot use
+- * prandom_u32() directly, as its tracepoint could cause recursion.
++ * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
++ * congruential generator, using constants from "Numerical Recipes".
+ */
+ static u32 kcsan_prandom_u32_max(u32 ep_ro)
+ {
+- struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
+- const u32 res = prandom_u32_state(state);
++ u32 state = this_cpu_read(kcsan_rand_state);
++
++ state = 1664525 * state + 1013904223;
++ this_cpu_write(kcsan_rand_state, state);
+
+- put_cpu_var(kcsan_rand_state);
+- return (u32)(((u64) res * ep_ro) >> 32);
++ return state % ep_ro;
+ }
+
+ static inline void reset_kcsan_skip(void)
+@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
+
+ void __init kcsan_init(void)
+ {
++ int cpu;
++
+ BUG_ON(!in_task());
+
+ kcsan_debugfs_init();
+- prandom_seed_full_state(&kcsan_rand_state);
++
++ for_each_possible_cpu(cpu)
++ per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
+
+ /*
+ * We are in the init task, and no other tasks should be running;
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index b02086d704923..5c3447cf7ad58 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -166,6 +166,11 @@ void kimage_file_post_load_cleanup(struct kimage *image)
+ vfree(pi->sechdrs);
+ pi->sechdrs = NULL;
+
++#ifdef CONFIG_IMA_KEXEC
++ vfree(image->ima_buffer);
++ image->ima_buffer = NULL;
++#endif /* CONFIG_IMA_KEXEC */
++
+ /* See if architecture has anything to cleanup post load */
+ arch_kimage_file_post_load_cleanup(image);
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index d5a3eb74a6574..779d8322e307d 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -861,7 +861,6 @@ out:
+ cpus_read_unlock();
+ }
+
+-#ifdef CONFIG_SYSCTL
+ static void optimize_all_kprobes(void)
+ {
+ struct hlist_head *head;
+@@ -887,6 +886,7 @@ out:
+ mutex_unlock(&kprobe_mutex);
+ }
+
++#ifdef CONFIG_SYSCTL
+ static void unoptimize_all_kprobes(void)
+ {
+ struct hlist_head *head;
+@@ -2497,18 +2497,14 @@ static int __init init_kprobes(void)
+ }
+ }
+
+-#if defined(CONFIG_OPTPROBES)
+-#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+- /* Init kprobe_optinsn_slots */
+- kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+-#endif
+- /* By default, kprobes can be optimized */
+- kprobes_allow_optimization = true;
+-#endif
+-
+ /* By default, kprobes are armed */
+ kprobes_all_disarmed = false;
+
++#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
++ /* Init kprobe_optinsn_slots for allocation */
++ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
++#endif
++
+ err = arch_init_kprobes();
+ if (!err)
+ err = register_die_notifier(&kprobe_exceptions_nb);
+@@ -2523,6 +2519,21 @@ static int __init init_kprobes(void)
+ }
+ early_initcall(init_kprobes);
+
++#if defined(CONFIG_OPTPROBES)
++static int __init init_optprobes(void)
++{
++ /*
++ * Enable kprobe optimization - this kicks the optimizer which
++ * depends on synchronize_rcu_tasks() and ksoftirqd, that is
++ * not spawned in early initcall. So delay the optimization.
++ */
++ optimize_all_kprobes();
++
++ return 0;
++}
++subsys_initcall(init_optprobes);
++#endif
++
+ #ifdef CONFIG_DEBUG_FS
+ static void report_probe(struct seq_file *pi, struct kprobe *p,
+ const char *sym, int offset, char *modname, struct kprobe *pp)
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index bdaf4829098c0..780012eb2f3fe 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3707,7 +3707,7 @@ static void
+ print_usage_bug(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
+ {
+- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
++ if (!debug_locks_off() || debug_locks_silent)
+ return;
+
+ pr_warn("\n");
+@@ -3748,6 +3748,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
+ enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
+ {
+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
++ graph_unlock();
+ print_usage_bug(curr, this, bad_bit, new_bit);
+ return 0;
+ }
+diff --git a/kernel/module.c b/kernel/module.c
+index 4bf30e4b3eaaa..1e5aad8123104 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2348,6 +2348,21 @@ static int verify_exported_symbols(struct module *mod)
+ return 0;
+ }
+
++static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
++{
++ /*
++ * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
++ * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
++ * i386 has a similar problem but may not deserve a fix.
++ *
++ * If we ever have to ignore many symbols, consider refactoring the code to
++ * only warn if referenced by a relocation.
++ */
++ if (emachine == EM_386 || emachine == EM_X86_64)
++ return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
++ return false;
++}
++
+ /* Change all symbols so that st_value encodes the pointer directly. */
+ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ {
+@@ -2395,8 +2410,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ break;
+ }
+
+- /* Ok if weak. */
+- if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
++ /* Ok if weak or ignored. */
++ if (!ksym &&
++ (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
++ ignore_undef_symbol(info->hdr->e_machine, name)))
+ break;
+
+ ret = PTR_ERR(ksym) ?: -ENOENT;
+@@ -2964,7 +2981,7 @@ static int module_sig_check(struct load_info *info, int flags)
+ }
+
+ if (is_module_sig_enforced()) {
+- pr_notice("%s: loading of %s is rejected\n", info->name, reason);
++ pr_notice("Loading of %s is rejected\n", reason);
+ return -EKEYREJECTED;
+ }
+
+@@ -2977,9 +2994,33 @@ static int module_sig_check(struct load_info *info, int flags)
+ }
+ #endif /* !CONFIG_MODULE_SIG */
+
+-/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
+-static int elf_header_check(struct load_info *info)
++static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
++{
++ unsigned long secend;
++
++ /*
++ * Check for both overflow and offset/size being
++ * too large.
++ */
++ secend = shdr->sh_offset + shdr->sh_size;
++ if (secend < shdr->sh_offset || secend > info->len)
++ return -ENOEXEC;
++
++ return 0;
++}
++
++/*
++ * Sanity checks against invalid binaries, wrong arch, weird elf version.
++ *
++ * Also do basic validity checks against section offsets and sizes, the
++ * section name string table, and the indices used for it (sh_name).
++ */
++static int elf_validity_check(struct load_info *info)
+ {
++ unsigned int i;
++ Elf_Shdr *shdr, *strhdr;
++ int err;
++
+ if (info->len < sizeof(*(info->hdr)))
+ return -ENOEXEC;
+
+@@ -2989,11 +3030,78 @@ static int elf_header_check(struct load_info *info)
+ || info->hdr->e_shentsize != sizeof(Elf_Shdr))
+ return -ENOEXEC;
+
++ /*
++ * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
++ * known and small. So e_shnum * sizeof(Elf_Shdr)
++ * will not overflow unsigned long on any platform.
++ */
+ if (info->hdr->e_shoff >= info->len
+ || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
+ info->len - info->hdr->e_shoff))
+ return -ENOEXEC;
+
++ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
++
++ /*
++ * Verify if the section name table index is valid.
++ */
++ if (info->hdr->e_shstrndx == SHN_UNDEF
++ || info->hdr->e_shstrndx >= info->hdr->e_shnum)
++ return -ENOEXEC;
++
++ strhdr = &info->sechdrs[info->hdr->e_shstrndx];
++ err = validate_section_offset(info, strhdr);
++ if (err < 0)
++ return err;
++
++ /*
++ * The section name table must be NUL-terminated, as required
++ * by the spec. This makes strcmp and pr_* calls that access
++ * strings in the section safe.
++ */
++ info->secstrings = (void *)info->hdr + strhdr->sh_offset;
++ if (info->secstrings[strhdr->sh_size - 1] != '\0')
++ return -ENOEXEC;
++
++ /*
++ * The code assumes that section 0 has a length of zero and
++ * an addr of zero, so check for it.
++ */
++ if (info->sechdrs[0].sh_type != SHT_NULL
++ || info->sechdrs[0].sh_size != 0
++ || info->sechdrs[0].sh_addr != 0)
++ return -ENOEXEC;
++
++ for (i = 1; i < info->hdr->e_shnum; i++) {
++ shdr = &info->sechdrs[i];
++ switch (shdr->sh_type) {
++ case SHT_NULL:
++ case SHT_NOBITS:
++ continue;
++ case SHT_SYMTAB:
++ if (shdr->sh_link == SHN_UNDEF
++ || shdr->sh_link >= info->hdr->e_shnum)
++ return -ENOEXEC;
++ fallthrough;
++ default:
++ err = validate_section_offset(info, shdr);
++ if (err < 0) {
++ pr_err("Invalid ELF section in module (section %u type %u)\n",
++ i, shdr->sh_type);
++ return err;
++ }
++
++ if (shdr->sh_flags & SHF_ALLOC) {
++ if (shdr->sh_name >= strhdr->sh_size) {
++ pr_err("Invalid ELF section name in module (section %u type %u)\n",
++ i, shdr->sh_type);
++ return -ENOEXEC;
++ }
++ }
++ break;
++ }
++ }
++
+ return 0;
+ }
+
+@@ -3095,11 +3203,6 @@ static int rewrite_section_headers(struct load_info *info, int flags)
+
+ for (i = 1; i < info->hdr->e_shnum; i++) {
+ Elf_Shdr *shdr = &info->sechdrs[i];
+- if (shdr->sh_type != SHT_NOBITS
+- && info->len < shdr->sh_offset + shdr->sh_size) {
+- pr_err("Module len %lu truncated\n", info->len);
+- return -ENOEXEC;
+- }
+
+ /*
+ * Mark all sections sh_addr with their address in the
+@@ -3133,11 +3236,6 @@ static int setup_load_info(struct load_info *info, int flags)
+ {
+ unsigned int i;
+
+- /* Set up the convenience variables */
+- info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
+- info->secstrings = (void *)info->hdr
+- + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
+-
+ /* Try to find a name early so we can log errors with a module name */
+ info->index.info = find_sec(info, ".modinfo");
+ if (info->index.info)
+@@ -3894,26 +3992,50 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ long err = 0;
+ char *after_dashes;
+
+- err = elf_header_check(info);
++ /*
++ * Do the signature check (if any) first. All that
++ * the signature check needs is info->len, it does
++ * not need any of the section info. That can be
++ * set up later. This will minimize the chances
++ * of a corrupt module causing problems before
++ * we even get to the signature check.
++ *
++ * The check will also adjust info->len by stripping
++ * off the sig length at the end of the module, making
++ * checks against info->len more correct.
++ */
++ err = module_sig_check(info, flags);
++ if (err)
++ goto free_copy;
++
++ /*
++ * Do basic sanity checks against the ELF header and
++ * sections.
++ */
++ err = elf_validity_check(info);
+ if (err) {
+- pr_err("Module has invalid ELF header\n");
++ pr_err("Module has invalid ELF structures\n");
+ goto free_copy;
+ }
+
++ /*
++ * Everything checks out, so set up the section info
++ * in the info structure.
++ */
+ err = setup_load_info(info, flags);
+ if (err)
+ goto free_copy;
+
++ /*
++ * Now that we know we have the correct module name, check
++ * if it's blacklisted.
++ */
+ if (blacklisted(info->name)) {
+ err = -EPERM;
+ pr_err("Module %s is blacklisted\n", info->name);
+ goto free_copy;
+ }
+
+- err = module_sig_check(info, flags);
+- if (err)
+- goto free_copy;
+-
+ err = rewrite_section_headers(info, flags);
+ if (err)
+ goto free_copy;
+diff --git a/kernel/module_signature.c b/kernel/module_signature.c
+index 4224a1086b7d8..00132d12487cd 100644
+--- a/kernel/module_signature.c
++++ b/kernel/module_signature.c
+@@ -25,7 +25,7 @@ int mod_check_sig(const struct module_signature *ms, size_t file_len,
+ return -EBADMSG;
+
+ if (ms->id_type != PKEY_ID_PKCS7) {
+- pr_err("%s: Module is not signed with expected PKCS#7 message\n",
++ pr_err("%s: not signed with expected PKCS#7 message\n",
+ name);
+ return -ENOPKG;
+ }
+diff --git a/kernel/module_signing.c b/kernel/module_signing.c
+index 9d9fc678c91d6..8723ae70ea1fe 100644
+--- a/kernel/module_signing.c
++++ b/kernel/module_signing.c
+@@ -30,7 +30,7 @@ int mod_verify_sig(const void *mod, struct load_info *info)
+
+ memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
+
+- ret = mod_check_sig(&ms, modlen, info->name);
++ ret = mod_check_sig(&ms, modlen, "module");
+ if (ret)
+ return ret;
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 5a95c688621fa..575a34b88936f 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -735,9 +735,9 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ logbuf_lock_irq();
+ }
+
+- if (user->seq < prb_first_valid_seq(prb)) {
++ if (r->info->seq != user->seq) {
+ /* our last seen message is gone, return error and reset */
+- user->seq = prb_first_valid_seq(prb);
++ user->seq = r->info->seq;
+ ret = -EPIPE;
+ logbuf_unlock_irq();
+ goto out;
+@@ -812,6 +812,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+ {
+ struct devkmsg_user *user = file->private_data;
++ struct printk_info info;
+ __poll_t ret = 0;
+
+ if (!user)
+@@ -820,9 +821,9 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+ poll_wait(file, &log_wait, wait);
+
+ logbuf_lock_irq();
+- if (prb_read_valid(prb, user->seq, NULL)) {
++ if (prb_read_valid_info(prb, user->seq, &info, NULL)) {
+ /* return error when data has vanished underneath us */
+- if (user->seq < prb_first_valid_seq(prb))
++ if (info.seq != user->seq)
+ ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
+ else
+ ret = EPOLLIN|EPOLLRDNORM;
+@@ -1559,6 +1560,7 @@ static void syslog_clear(void)
+
+ int do_syslog(int type, char __user *buf, int len, int source)
+ {
++ struct printk_info info;
+ bool clear = false;
+ static int saved_console_loglevel = LOGLEVEL_DEFAULT;
+ int error;
+@@ -1629,9 +1631,14 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+ logbuf_lock_irq();
+- if (syslog_seq < prb_first_valid_seq(prb)) {
++ if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
++ /* No unread messages. */
++ logbuf_unlock_irq();
++ return 0;
++ }
++ if (info.seq != syslog_seq) {
+ /* messages are gone, move to first one */
+- syslog_seq = prb_first_valid_seq(prb);
++ syslog_seq = info.seq;
+ syslog_partial = 0;
+ }
+ if (source == SYSLOG_FROM_PROC) {
+@@ -1643,7 +1650,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ error = prb_next_seq(prb) - syslog_seq;
+ } else {
+ bool time = syslog_partial ? syslog_time : printk_time;
+- struct printk_info info;
+ unsigned int line_count;
+ u64 seq;
+
+@@ -3429,9 +3435,11 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ goto out;
+
+ logbuf_lock_irqsave(flags);
+- if (dumper->cur_seq < prb_first_valid_seq(prb)) {
+- /* messages are gone, move to first available one */
+- dumper->cur_seq = prb_first_valid_seq(prb);
++ if (prb_read_valid_info(prb, dumper->cur_seq, &info, NULL)) {
++ if (info.seq != dumper->cur_seq) {
++ /* messages are gone, move to first available one */
++ dumper->cur_seq = info.seq;
++ }
+ }
+
+ /* last entry */
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index a0e6f746de6c4..2e9e3ed7d63ef 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -45,6 +45,8 @@ struct printk_safe_seq_buf {
+ static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
+ static DEFINE_PER_CPU(int, printk_context);
+
++static DEFINE_RAW_SPINLOCK(safe_read_lock);
++
+ #ifdef CONFIG_PRINTK_NMI
+ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
+ #endif
+@@ -180,8 +182,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s)
+ */
+ static void __printk_safe_flush(struct irq_work *work)
+ {
+- static raw_spinlock_t read_lock =
+- __RAW_SPIN_LOCK_INITIALIZER(read_lock);
+ struct printk_safe_seq_buf *s =
+ container_of(work, struct printk_safe_seq_buf, work);
+ unsigned long flags;
+@@ -195,7 +195,7 @@ static void __printk_safe_flush(struct irq_work *work)
+ * different CPUs. This is especially important when printing
+ * a backtrace.
+ */
+- raw_spin_lock_irqsave(&read_lock, flags);
++ raw_spin_lock_irqsave(&safe_read_lock, flags);
+
+ i = 0;
+ more:
+@@ -232,7 +232,7 @@ more:
+
+ out:
+ report_message_lost(s);
+- raw_spin_unlock_irqrestore(&read_lock, flags);
++ raw_spin_unlock_irqrestore(&safe_read_lock, flags);
+ }
+
+ /**
+@@ -278,6 +278,14 @@ void printk_safe_flush_on_panic(void)
+ raw_spin_lock_init(&logbuf_lock);
+ }
+
++ if (raw_spin_is_locked(&safe_read_lock)) {
++ if (num_online_cpus() > 1)
++ return;
++
++ debug_locks_off();
++ raw_spin_lock_init(&safe_read_lock);
++ }
++
+ printk_safe_flush();
+ }
+
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 40e5e3dd253e0..ce17b8477442f 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -644,7 +644,6 @@ static noinstr void rcu_eqs_enter(bool user)
+ trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ rdp = this_cpu_ptr(&rcu_data);
+- do_nocb_deferred_wakeup(rdp);
+ rcu_prepare_for_idle();
+ rcu_preempt_deferred_qs(current);
+
+@@ -678,6 +677,50 @@ void rcu_idle_enter(void)
+ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+
+ #ifdef CONFIG_NO_HZ_FULL
++
++#if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
++/*
++ * An empty function that will trigger a reschedule on
++ * IRQ tail once IRQs get re-enabled on userspace/guest resume.
++ */
++static void late_wakeup_func(struct irq_work *work)
++{
++}
++
++static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
++ IRQ_WORK_INIT(late_wakeup_func);
++
++/*
++ * If either:
++ *
++ * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
++ * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
++ *
++ * In these cases the late RCU wake ups aren't supported in the resched loops and our
++ * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
++ * get re-enabled again.
++ */
++noinstr static void rcu_irq_work_resched(void)
++{
++ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
++
++ if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
++ return;
++
++ if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
++ return;
++
++ instrumentation_begin();
++ if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
++ irq_work_queue(this_cpu_ptr(&late_wakeup_work));
++ }
++ instrumentation_end();
++}
++
++#else
++static inline void rcu_irq_work_resched(void) { }
++#endif
++
+ /**
+ * rcu_user_enter - inform RCU that we are resuming userspace.
+ *
+@@ -692,8 +735,16 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
+ noinstr void rcu_user_enter(void)
+ {
+ lockdep_assert_irqs_disabled();
++
++ /*
++ * Other than generic entry implementation, we may be past the last
++ * rescheduling opportunity in the entry code. Trigger a self IPI
++ * that will fire and reschedule once we resume in user/guest mode.
++ */
++ rcu_irq_work_resched();
+ rcu_eqs_enter(true);
+ }
++
+ #endif /* CONFIG_NO_HZ_FULL */
+
+ /**
+diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
+index 7708ed161f4a2..9226f4021a36d 100644
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -433,7 +433,7 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
+ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
+ unsigned long flags);
+ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp);
+ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
+ static void rcu_spawn_cpu_nocb_kthread(int cpu);
+ static void __init rcu_spawn_nocb_kthreads(void);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 7e291ce0a1d6f..cdc1b7651c039 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -1631,8 +1631,8 @@ bool rcu_is_nocb_cpu(int cpu)
+ * Kick the GP kthread for this NOCB group. Caller holds ->nocb_lock
+ * and this function releases it.
+ */
+-static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+- unsigned long flags)
++static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
++ unsigned long flags)
+ __releases(rdp->nocb_lock)
+ {
+ bool needwake = false;
+@@ -1643,7 +1643,7 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
+ TPS("AlreadyAwake"));
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+- return;
++ return false;
+ }
+ del_timer(&rdp->nocb_timer);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+@@ -1656,6 +1656,8 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
+ if (needwake)
+ wake_up_process(rdp_gp->nocb_gp_kthread);
++
++ return needwake;
+ }
+
+ /*
+@@ -2152,20 +2154,23 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+ }
+
+ /* Do a deferred wakeup of rcu_nocb_kthread(). */
+-static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
+ {
+ unsigned long flags;
+ int ndw;
++ int ret;
+
+ rcu_nocb_lock_irqsave(rdp, flags);
+ if (!rcu_nocb_need_deferred_wakeup(rdp)) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+- return;
++ return false;
+ }
+ ndw = READ_ONCE(rdp->nocb_defer_wakeup);
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+- wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
++ ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
++
++ return ret;
+ }
+
+ /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
+@@ -2181,12 +2186,19 @@ static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
+ * This means we do an inexact common-case check. Note that if
+ * we miss, ->nocb_timer will eventually clean things up.
+ */
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ {
+ if (rcu_nocb_need_deferred_wakeup(rdp))
+- do_nocb_deferred_wakeup_common(rdp);
++ return do_nocb_deferred_wakeup_common(rdp);
++ return false;
+ }
+
++void rcu_nocb_flush_deferred_wakeup(void)
++{
++ do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
++}
++EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
++
+ void __init rcu_init_nohz(void)
+ {
+ int cpu;
+@@ -2518,8 +2530,9 @@ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
+ return false;
+ }
+
+-static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
++static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ {
++ return false;
+ }
+
+ static void rcu_spawn_cpu_nocb_kthread(int cpu)
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 04a3ce20da671..bbc78794224ac 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3943,6 +3943,22 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
+ trace_sched_util_est_cfs_tp(cfs_rq);
+ }
+
++static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
++ struct task_struct *p)
++{
++ unsigned int enqueued;
++
++ if (!sched_feat(UTIL_EST))
++ return;
++
++ /* Update root cfs_rq's estimated utilization */
++ enqueued = cfs_rq->avg.util_est.enqueued;
++ enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
++ WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
++
++ trace_sched_util_est_cfs_tp(cfs_rq);
++}
++
+ /*
+ * Check if a (signed) value is within a specified (unsigned) margin,
+ * based on the observation that:
+@@ -3956,23 +3972,16 @@ static inline bool within_margin(int value, int margin)
+ return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
+ }
+
+-static void
+-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
++static inline void util_est_update(struct cfs_rq *cfs_rq,
++ struct task_struct *p,
++ bool task_sleep)
+ {
+ long last_ewma_diff;
+ struct util_est ue;
+- int cpu;
+
+ if (!sched_feat(UTIL_EST))
+ return;
+
+- /* Update root cfs_rq's estimated utilization */
+- ue.enqueued = cfs_rq->avg.util_est.enqueued;
+- ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
+- WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
+-
+- trace_sched_util_est_cfs_tp(cfs_rq);
+-
+ /*
+ * Skip update of task's estimated utilization when the task has not
+ * yet completed an activation, e.g. being migrated.
+@@ -4012,8 +4021,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+ * To avoid overestimation of actual task utilization, skip updates if
+ * we cannot grant there is idle time in this CPU.
+ */
+- cpu = cpu_of(rq_of(cfs_rq));
+- if (task_util(p) > capacity_orig_of(cpu))
++ if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
+ return;
+
+ /*
+@@ -4052,7 +4060,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ if (!static_branch_unlikely(&sched_asym_cpucapacity))
+ return;
+
+- if (!p) {
++ if (!p || p->nr_cpus_allowed == 1) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+@@ -4096,8 +4104,11 @@ static inline void
+ util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+ static inline void
+-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
+- bool task_sleep) {}
++util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
++
++static inline void
++util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
++ bool task_sleep) {}
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
+
+ #endif /* CONFIG_SMP */
+@@ -5609,6 +5620,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ int idle_h_nr_running = task_has_idle_policy(p);
+ bool was_sched_idle = sched_idle_rq(rq);
+
++ util_est_dequeue(&rq->cfs, p);
++
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ dequeue_entity(cfs_rq, se, flags);
+@@ -5659,7 +5672,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ rq->next_balance = jiffies;
+
+ dequeue_throttle:
+- util_est_dequeue(&rq->cfs, p, task_sleep);
++ util_est_update(&rq->cfs, p, task_sleep);
+ hrtick_update(rq);
+ }
+
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 305727ea06772..7199e6f23789e 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -285,6 +285,7 @@ static void do_idle(void)
+ }
+
+ arch_cpu_idle_enter();
++ rcu_nocb_flush_deferred_wakeup();
+
+ /*
+ * In poll mode we reenable interrupts and spin. Also if we
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 952dc1c902295..63b40d12896bd 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -1284,6 +1284,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
+ const bool recheck_after_trace)
+ {
+ BUG();
++
++ return -1;
+ }
+ #endif
+
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 1b6070bf97bb0..aeb0adfa06063 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -14,6 +14,7 @@
+ #include
+ #include