summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2024-03-01 08:06:46 -0500
committerMike Pagano <mpagano@gentoo.org>2024-03-01 08:06:46 -0500
commit6b2028bf1d3e30c36d52b0f17634981b44cca073 (patch)
treea2968245784a1d6af8e01940ae229bd7b2cf701c
parentUpdate cpu optimization patch (diff)
downloadlinux-patches-6b2028bf1d3e30c36d52b0f17634981b44cca073.tar.gz
linux-patches-6b2028bf1d3e30c36d52b0f17634981b44cca073.tar.bz2
linux-patches-6b2028bf1d3e30c36d52b0f17634981b44cca073.zip
Linux patch 6.6.196.6-25
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1018_linux-6.6.19.patch13387
2 files changed, 13391 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index ff88075d..fc6a3e20 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch: 1017_linux-6.6.18.patch
From: https://www.kernel.org
Desc: Linux 6.6.18
+Patch: 1018_linux-6.6.19.patch
+From: https://www.kernel.org
+Desc: Linux 6.6.19
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
Desc: Enable link security restrictions by default.
diff --git a/1018_linux-6.6.19.patch b/1018_linux-6.6.19.patch
new file mode 100644
index 00000000..98f024f0
--- /dev/null
+++ b/1018_linux-6.6.19.patch
@@ -0,0 +1,13387 @@
+diff --git a/Documentation/conf.py b/Documentation/conf.py
+index d4fdf6a3875a8..dfc19c915d5c4 100644
+--- a/Documentation/conf.py
++++ b/Documentation/conf.py
+@@ -383,6 +383,12 @@ latex_elements = {
+ verbatimhintsturnover=false,
+ ''',
+
++ #
++ # Some of our authors are fond of deep nesting; tell latex to
++ # cope.
++ #
++ 'maxlistdepth': '10',
++
+ # For CJK One-half spacing, need to be in front of hyperref
+ 'extrapackages': r'\usepackage{setspace}',
+
+diff --git a/Makefile b/Makefile
+index b7198af9e59b4..6130b6bd8d6c5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
+index e899de681f475..5be0e8fd2633c 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
+@@ -45,8 +45,8 @@ spi1_gpio: spi1-gpio {
+ num-chipselects = <1>;
+ cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
+
+- tpmdev@0 {
+- compatible = "tcg,tpm_tis-spi";
++ tpm@0 {
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
+index a677c827e758f..5a8169bbda879 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
+@@ -80,8 +80,8 @@ spi_gpio: spi {
+ gpio-miso = <&gpio ASPEED_GPIO(R, 5) GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+
+- tpmdev@0 {
+- compatible = "tcg,tpm_tis-spi";
++ tpm@0 {
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+diff --git a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
+index 3f6010ef2b86f..213023bc5aec4 100644
+--- a/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
++++ b/arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
+@@ -456,7 +456,7 @@ &i2c1 {
+ status = "okay";
+
+ tpm: tpm@2e {
+- compatible = "tcg,tpm-tis-i2c";
++ compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi b/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
+index 31590d3186a2e..00e5887c926f1 100644
+--- a/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
++++ b/arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
+@@ -35,8 +35,8 @@ spi_gpio: spi {
+ gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
+ gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
+
+- tpmdev@0 {
+- compatible = "tcg,tpm_tis-spi";
++ tpm@0 {
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ spi-max-frequency = <33000000>;
+ reg = <0>;
+ };
+diff --git a/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
+index ea627638e40cf..7dd1fe5a2fb76 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
++++ b/arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
+@@ -121,7 +121,7 @@ &ecspi1 {
+ tpm_tis: tpm@1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpm>;
+- compatible = "tcg,tpm_tis-spi";
++ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <1>;
+ spi-max-frequency = <20000000>;
+ interrupt-parent = <&gpio5>;
+diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts b/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
+index 3a723843d5626..9984b343cdf0c 100644
+--- a/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
++++ b/arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
+@@ -130,7 +130,7 @@ &ecspi4 {
+ * TCG specification - Section 6.4.1 Clocking:
+ * TPM shall support a SPI clock frequency range of 10-24 MHz.
+ */
+- st33htph: tpm-tis@0 {
++ st33htph: tpm@0 {
+ compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <24000000>;
+diff --git a/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
+index b8730aa52ce6f..a59331aa58e55 100644
+--- a/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
++++ b/arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
+@@ -217,7 +217,7 @@ &spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+
+- tpm_spi_tis@0 {
++ tpm@0 {
+ compatible = "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
+index 71b1139764204..8b1ec60a9a467 100644
+--- a/arch/arm/mach-ep93xx/core.c
++++ b/arch/arm/mach-ep93xx/core.c
+@@ -339,6 +339,7 @@ static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("G", 0, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
++ { }
+ },
+ };
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+index 13674dc64be9d..116bf9738a8a4 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+@@ -484,7 +484,7 @@ &uart3 { /* A53 Debug */
+ &uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+- status = "okay";
++ status = "disabled";
+ };
+
+ &usb3_phy0 {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+index 4240e20d38ac3..258e90cc16ff3 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+@@ -168,6 +168,13 @@ reg_vcc_12v0: regulator-12v0 {
+ enable-active-high;
+ };
+
++ reg_vcc_1v8: regulator-1v8 {
++ compatible = "regulator-fixed";
++ regulator-name = "VCC_1V8";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ };
++
+ reg_vcc_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+@@ -464,7 +471,7 @@ tlv320aic3x04: audio-codec@18 {
+ clock-names = "mclk";
+ clocks = <&audio_blk_ctrl IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1>;
+ reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
+- iov-supply = <&reg_vcc_3v3>;
++ iov-supply = <&reg_vcc_1v8>;
+ ldoin-supply = <&reg_vcc_3v3>;
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
+index 42ce78beb4134..20955556b624d 100644
+--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
+@@ -632,6 +632,7 @@ spi0: spi@ff1d0000 {
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 12>, <&dmac 13>;
+ dma-names = "tx", "rx";
++ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>;
+ #address-cells = <1>;
+@@ -647,6 +648,7 @@ spi1: spi@ff1d8000 {
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 14>, <&dmac 15>;
+ dma-names = "tx", "rx";
++ num-cs = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>;
+ #address-cells = <1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+index d1503a4b233a3..9299fa7e3e215 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+@@ -163,13 +163,13 @@ &gpio0 {
+
+ &gpio1 {
+ gpio-line-names = /* GPIO1 A0-A7 */
+- "HEADER_27_3v3", "HEADER_28_3v3", "", "",
++ "HEADER_27_3v3", "", "", "",
+ "HEADER_29_1v8", "", "HEADER_7_1v8", "",
+ /* GPIO1 B0-B7 */
+ "", "HEADER_31_1v8", "HEADER_33_1v8", "",
+ "HEADER_11_1v8", "HEADER_13_1v8", "", "",
+ /* GPIO1 C0-C7 */
+- "", "", "", "",
++ "", "HEADER_28_3v3", "", "",
+ "", "", "", "",
+ /* GPIO1 D0-D7 */
+ "", "", "", "",
+@@ -193,11 +193,11 @@ &gpio3 {
+
+ &gpio4 {
+ gpio-line-names = /* GPIO4 A0-A7 */
+- "", "", "HEADER_37_3v3", "HEADER_32_3v3",
+- "HEADER_36_3v3", "", "HEADER_35_3v3", "HEADER_38_3v3",
++ "", "", "HEADER_37_3v3", "HEADER_8_3v3",
++ "HEADER_10_3v3", "", "HEADER_32_3v3", "HEADER_35_3v3",
+ /* GPIO4 B0-B7 */
+ "", "", "", "HEADER_40_3v3",
+- "HEADER_8_3v3", "HEADER_10_3v3", "", "",
++ "HEADER_38_3v3", "HEADER_36_3v3", "", "",
+ /* GPIO4 C0-C7 */
+ "", "", "", "",
+ "", "", "", "",
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index 8df46f186c64b..b54506d707380 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -360,6 +360,7 @@ extern void sme_alloc(struct task_struct *task, bool flush);
+ extern unsigned int sme_get_vl(void);
+ extern int sme_set_current_vl(unsigned long arg);
+ extern int sme_get_current_vl(void);
++extern void sme_suspend_exit(void);
+
+ /*
+ * Return how many bytes of memory are required to store the full SME
+@@ -395,6 +396,7 @@ static inline int sme_max_vl(void) { return 0; }
+ static inline int sme_max_virtualisable_vl(void) { return 0; }
+ static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
+ static inline int sme_get_current_vl(void) { return -EINVAL; }
++static inline void sme_suspend_exit(void) { }
+
+ static inline size_t sme_state_size(struct task_struct const *task)
+ {
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 1e1e0511c0081..5cdfcc9e3e54b 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1406,6 +1406,22 @@ void __init sme_setup(void)
+ get_sme_default_vl());
+ }
+
++void sme_suspend_exit(void)
++{
++ u64 smcr = 0;
++
++ if (!system_supports_sme())
++ return;
++
++ if (system_supports_fa64())
++ smcr |= SMCR_ELx_FA64;
++ if (system_supports_sme2())
++ smcr |= SMCR_ELx_EZT0;
++
++ write_sysreg_s(smcr, SYS_SMCR_EL1);
++ write_sysreg_s(0, SYS_SMPRI_EL1);
++}
++
+ #endif /* CONFIG_ARM64_SME */
+
+ static void sve_init_regs(void)
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 0fbdf5fe64d8d..045af2bfd656a 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -12,6 +12,7 @@
+ #include <asm/daifflags.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/exec.h>
++#include <asm/fpsimd.h>
+ #include <asm/mte.h>
+ #include <asm/memory.h>
+ #include <asm/mmu_context.h>
+@@ -80,6 +81,8 @@ void notrace __cpu_suspend_exit(void)
+ */
+ spectre_v4_enable_mitigation(NULL);
+
++ sme_suspend_exit();
++
+ /* Restore additional feature-specific configuration */
+ ptrauth_suspend_exit();
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index c420723548d8d..4f9084ba7949c 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -462,6 +462,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
+ }
+
+ irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
++ if (!irq)
++ continue;
++
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ irq->pending_latch = pendmask & (1U << bit_nr);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+@@ -1427,6 +1430,8 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
+
+ for (i = 0; i < irq_count; i++) {
+ irq = vgic_get_irq(kvm, NULL, intids[i]);
++ if (!irq)
++ continue;
+
+ update_affinity(irq, vcpu2);
+
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index e14396a2ddcbf..9fd8644a9a4c6 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -11,6 +11,7 @@ config LOONGARCH
+ select ARCH_DISABLE_KASAN_INLINE
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
++ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_CPU_FINALIZE_INIT
+ select ARCH_HAS_FORTIFY_SOURCE
+@@ -97,6 +98,7 @@ config LOONGARCH
+ select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KGDB if PERF_EVENTS
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
++ select HAVE_ARCH_SECCOMP
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+@@ -603,23 +605,6 @@ config RANDOMIZE_BASE_MAX_OFFSET
+
+ This is limited by the size of the lower address memory, 256MB.
+
+-config SECCOMP
+- bool "Enable seccomp to safely compute untrusted bytecode"
+- depends on PROC_FS
+- default y
+- help
+- This kernel feature is useful for number crunching applications
+- that may need to compute untrusted bytecode during their
+- execution. By using pipes or other transports made available to
+- the process as file descriptors supporting the read/write
+- syscalls, it's possible to isolate those applications in
+- their own address space using seccomp. Once seccomp is
+- enabled via /proc/<pid>/seccomp, it cannot be disabled
+- and the task is only allowed to execute a few safe syscalls
+- defined by each seccomp mode.
+-
+- If unsure, say Y. Only embedded should say N here.
+-
+ endmenu
+
+ config ARCH_SELECT_MEMORY_MODEL
+@@ -638,10 +623,6 @@ config ARCH_SPARSEMEM_ENABLE
+ or have huge holes in the physical address space for other reasons.
+ See <file:Documentation/mm/numa.rst> for more.
+
+-config ARCH_ENABLE_THP_MIGRATION
+- def_bool y
+- depends on TRANSPARENT_HUGEPAGE
+-
+ config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
+index 8de6c4b83a61a..49e29b29996f0 100644
+--- a/arch/loongarch/include/asm/acpi.h
++++ b/arch/loongarch/include/asm/acpi.h
+@@ -32,8 +32,10 @@ static inline bool acpi_has_cpu_in_madt(void)
+ return true;
+ }
+
++#define MAX_CORE_PIC 256
++
+ extern struct list_head acpi_wakeup_device_list;
+-extern struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
++extern struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
+
+ extern int __init parse_acpi_topology(void);
+
+diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
+index 8e00a754e5489..55d6a48c76a82 100644
+--- a/arch/loongarch/kernel/acpi.c
++++ b/arch/loongarch/kernel/acpi.c
+@@ -29,11 +29,9 @@ int disabled_cpus;
+
+ u64 acpi_saved_sp;
+
+-#define MAX_CORE_PIC 256
+-
+ #define PREFIX "ACPI: "
+
+-struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
++struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
+
+ void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
+ {
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index aed65915e932e..d7409a3e67a53 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -367,6 +367,8 @@ void __init platform_init(void)
+ acpi_gbl_use_default_register_widths = false;
+ acpi_boot_table_init();
+ #endif
++
++ early_init_fdt_scan_reserved_mem();
+ unflatten_and_copy_device_tree();
+
+ #ifdef CONFIG_NUMA
+@@ -400,8 +402,6 @@ static void __init arch_mem_init(char **cmdline_p)
+
+ check_kernel_sections_mem();
+
+- early_init_fdt_scan_reserved_mem();
+-
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
+index 2fbf541d7b4f6..378ffa78ffeb4 100644
+--- a/arch/loongarch/kernel/smp.c
++++ b/arch/loongarch/kernel/smp.c
+@@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec)
+ }
+ }
+
++static inline void set_cpu_core_map(int cpu)
++{
++ int i;
++
++ cpumask_set_cpu(cpu, &cpu_core_setup_map);
++
++ for_each_cpu(i, &cpu_core_setup_map) {
++ if (cpu_data[cpu].package == cpu_data[i].package) {
++ cpumask_set_cpu(i, &cpu_core_map[cpu]);
++ cpumask_set_cpu(cpu, &cpu_core_map[i]);
++ }
++ }
++}
++
++static inline void set_cpu_sibling_map(int cpu)
++{
++ int i;
++
++ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
++
++ for_each_cpu(i, &cpu_sibling_setup_map) {
++ if (cpus_are_siblings(cpu, i)) {
++ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
++ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
++ }
++ }
++}
++
++static inline void clear_cpu_sibling_map(int cpu)
++{
++ int i;
++
++ for_each_cpu(i, &cpu_sibling_setup_map) {
++ if (cpus_are_siblings(cpu, i)) {
++ cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
++ cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
++ }
++ }
++
++ cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
++}
++
++/*
++ * Calculate a new cpu_foreign_map mask whenever a
++ * new cpu appears or disappears.
++ */
++void calculate_cpu_foreign_map(void)
++{
++ int i, k, core_present;
++ cpumask_t temp_foreign_map;
++
++ /* Re-calculate the mask */
++ cpumask_clear(&temp_foreign_map);
++ for_each_online_cpu(i) {
++ core_present = 0;
++ for_each_cpu(k, &temp_foreign_map)
++ if (cpus_are_siblings(i, k))
++ core_present = 1;
++ if (!core_present)
++ cpumask_set_cpu(i, &temp_foreign_map);
++ }
++
++ for_each_online_cpu(i)
++ cpumask_andnot(&cpu_foreign_map[i],
++ &temp_foreign_map, &cpu_sibling_map[i]);
++}
++
+ /* Send mailbox buffer via Mail_Send */
+ static void csr_mail_send(uint64_t data, int cpu, int mailbox)
+ {
+@@ -300,6 +367,7 @@ int loongson_cpu_disable(void)
+ numa_remove_cpu(cpu);
+ #endif
+ set_cpu_online(cpu, false);
++ clear_cpu_sibling_map(cpu);
+ calculate_cpu_foreign_map();
+ local_irq_save(flags);
+ irq_migrate_all_off_this_cpu();
+@@ -334,6 +402,7 @@ void __noreturn arch_cpu_idle_dead(void)
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
++ local_irq_disable();
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+@@ -376,59 +445,6 @@ static int __init ipi_pm_init(void)
+ core_initcall(ipi_pm_init);
+ #endif
+
+-static inline void set_cpu_sibling_map(int cpu)
+-{
+- int i;
+-
+- cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+-
+- for_each_cpu(i, &cpu_sibling_setup_map) {
+- if (cpus_are_siblings(cpu, i)) {
+- cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+- cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+- }
+- }
+-}
+-
+-static inline void set_cpu_core_map(int cpu)
+-{
+- int i;
+-
+- cpumask_set_cpu(cpu, &cpu_core_setup_map);
+-
+- for_each_cpu(i, &cpu_core_setup_map) {
+- if (cpu_data[cpu].package == cpu_data[i].package) {
+- cpumask_set_cpu(i, &cpu_core_map[cpu]);
+- cpumask_set_cpu(cpu, &cpu_core_map[i]);
+- }
+- }
+-}
+-
+-/*
+- * Calculate a new cpu_foreign_map mask whenever a
+- * new cpu appears or disappears.
+- */
+-void calculate_cpu_foreign_map(void)
+-{
+- int i, k, core_present;
+- cpumask_t temp_foreign_map;
+-
+- /* Re-calculate the mask */
+- cpumask_clear(&temp_foreign_map);
+- for_each_online_cpu(i) {
+- core_present = 0;
+- for_each_cpu(k, &temp_foreign_map)
+- if (cpus_are_siblings(i, k))
+- core_present = 1;
+- if (!core_present)
+- cpumask_set_cpu(i, &temp_foreign_map);
+- }
+-
+- for_each_online_cpu(i)
+- cpumask_andnot(&cpu_foreign_map[i],
+- &temp_foreign_map, &cpu_sibling_map[i]);
+-}
+-
+ /* Preload SMP state for boot cpu */
+ void smp_prepare_boot_cpu(void)
+ {
+diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
+index 5c97d14633282..4305d99b33130 100644
+--- a/arch/loongarch/vdso/Makefile
++++ b/arch/loongarch/vdso/Makefile
+@@ -2,6 +2,7 @@
+ # Objects to go into the VDSO.
+
+ KASAN_SANITIZE := n
++UBSAN_SANITIZE := n
+ KCOV_INSTRUMENT := n
+
+ # Include the generic Makefile to check the built vdso.
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 246c6a6b02614..5b778995d4483 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
+
+ void reserve_exception_space(phys_addr_t addr, unsigned long size)
+ {
+- memblock_reserve(addr, size);
++ /*
++ * reserve exception space on CPUs other than CPU0
++ * is too late, since memblock is unavailable when APs
++ * up
++ */
++ if (smp_processor_id() == 0)
++ memblock_reserve(addr, size);
+ }
+
+ void __init *set_except_vector(int n, void *addr)
+diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
+index 1fc89fa2c2d21..e37c48770b585 100644
+--- a/arch/parisc/kernel/processor.c
++++ b/arch/parisc/kernel/processor.c
+@@ -172,7 +172,6 @@ static int __init processor_probe(struct parisc_device *dev)
+ p->cpu_num = cpu_info.cpu_num;
+ p->cpu_loc = cpu_info.cpu_loc;
+
+- set_cpu_possible(cpuid, true);
+ store_cpu_topology(cpuid);
+
+ #ifdef CONFIG_SMP
+@@ -474,13 +473,6 @@ static struct parisc_driver cpu_driver __refdata = {
+ */
+ void __init processor_init(void)
+ {
+- unsigned int cpu;
+-
+ reset_cpu_topology();
+-
+- /* reset possible mask. We will mark those which are possible. */
+- for_each_possible_cpu(cpu)
+- set_cpu_possible(cpu, false);
+-
+ register_parisc_driver(&cpu_driver);
+ }
+diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
+index 27ae40a443b80..f7e0fee5ee55a 100644
+--- a/arch/parisc/kernel/unwind.c
++++ b/arch/parisc/kernel/unwind.c
+@@ -228,10 +228,8 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
+ #ifdef CONFIG_IRQSTACKS
+ extern void * const _call_on_stack;
+ #endif /* CONFIG_IRQSTACKS */
+- void *ptr;
+
+- ptr = dereference_kernel_function_descriptor(&handle_interruption);
+- if (pc_is_kernel_fn(pc, ptr)) {
++ if (pc_is_kernel_fn(pc, handle_interruption)) {
+ struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
+ dbg("Unwinding through handle_interruption()\n");
+ info->prev_sp = regs->gr[30];
+@@ -239,13 +237,13 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
+ return 1;
+ }
+
+- if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
+- pc_is_kernel_fn(pc, syscall_exit)) {
++ if (pc == (unsigned long)&ret_from_kernel_thread ||
++ pc == (unsigned long)&syscall_exit) {
+ info->prev_sp = info->prev_ip = 0;
+ return 1;
+ }
+
+- if (pc_is_kernel_fn(pc, intr_return)) {
++ if (pc == (unsigned long)&intr_return) {
+ struct pt_regs *regs;
+
+ dbg("Found intr_return()\n");
+@@ -257,14 +255,14 @@ static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int
+ }
+
+ if (pc_is_kernel_fn(pc, _switch_to) ||
+- pc_is_kernel_fn(pc, _switch_to_ret)) {
++ pc == (unsigned long)&_switch_to_ret) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ return 1;
+ }
+
+ #ifdef CONFIG_IRQSTACKS
+- if (pc_is_kernel_fn(pc, _call_on_stack)) {
++ if (pc == (unsigned long)&_call_on_stack) {
+ info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
+ info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
+ return 1;
+diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
+index d9fcff5750271..2689e7139b9ea 100644
+--- a/arch/powerpc/include/asm/ppc-pci.h
++++ b/arch/powerpc/include/asm/ppc-pci.h
+@@ -30,6 +30,16 @@ void *pci_traverse_device_nodes(struct device_node *start,
+ void *data);
+ extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+
++#if defined(CONFIG_IOMMU_API) && (defined(CONFIG_PPC_PSERIES) || \
++ defined(CONFIG_PPC_POWERNV))
++extern void ppc_iommu_register_device(struct pci_controller *phb);
++extern void ppc_iommu_unregister_device(struct pci_controller *phb);
++#else
++static inline void ppc_iommu_register_device(struct pci_controller *phb) { }
++static inline void ppc_iommu_unregister_device(struct pci_controller *phb) { }
++#endif
++
++
+ /* From rtas_pci.h */
+ extern void init_pci_config_tokens (void);
+ extern unsigned long get_phb_buid (struct device_node *);
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 14251bc5219eb..efaca0c6eff9d 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -1344,7 +1344,7 @@ static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
+ struct pci_controller *hose;
+
+ if (!dev_is_pci(dev))
+- return ERR_PTR(-EPERM);
++ return ERR_PTR(-ENODEV);
+
+ pdev = to_pci_dev(dev);
+ hose = pdev->bus->sysdata;
+@@ -1393,6 +1393,21 @@ static const struct attribute_group *spapr_tce_iommu_groups[] = {
+ NULL,
+ };
+
++void ppc_iommu_register_device(struct pci_controller *phb)
++{
++ iommu_device_sysfs_add(&phb->iommu, phb->parent,
++ spapr_tce_iommu_groups, "iommu-phb%04x",
++ phb->global_number);
++ iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
++ phb->parent);
++}
++
++void ppc_iommu_unregister_device(struct pci_controller *phb)
++{
++ iommu_device_unregister(&phb->iommu);
++ iommu_device_sysfs_remove(&phb->iommu);
++}
++
+ /*
+ * This registers IOMMU devices of PHBs. This needs to happen
+ * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
+@@ -1403,11 +1418,7 @@ static int __init spapr_tce_setup_phb_iommus_initcall(void)
+ struct pci_controller *hose;
+
+ list_for_each_entry(hose, &hose_list, list_node) {
+- iommu_device_sysfs_add(&hose->iommu, hose->parent,
+- spapr_tce_iommu_groups, "iommu-phb%04x",
+- hose->global_number);
+- iommu_device_register(&hose->iommu, &spapr_tce_iommu_ops,
+- hose->parent);
++ ppc_iommu_register_device(hose);
+ }
+ return 0;
+ }
+diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
+index 4ba8245681192..4448386268d99 100644
+--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
+@@ -35,6 +35,8 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
+
+ pseries_msi_allocate_domains(phb);
+
++ ppc_iommu_register_device(phb);
++
+ /* Create EEH devices for the PHB */
+ eeh_phb_pe_create(phb);
+
+@@ -76,6 +78,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
+ }
+ }
+
++ ppc_iommu_unregister_device(phb);
++
+ pseries_msi_free_domains(phb);
+
+ /* Keep a reference so phb isn't freed yet */
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index d34d5813d0066..777362cb4ea80 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -241,7 +241,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ /* combine single writes by using store-block insn */
+ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+ {
+- zpci_memcpy_toio(to, from, count);
++ zpci_memcpy_toio(to, from, count * 8);
+ }
+
+ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
+index 7417345c6639a..60da865c079a2 100644
+--- a/arch/sparc/Makefile
++++ b/arch/sparc/Makefile
+@@ -60,7 +60,7 @@ libs-y += arch/sparc/prom/
+ libs-y += arch/sparc/lib/
+
+ drivers-$(CONFIG_PM) += arch/sparc/power/
+-drivers-$(CONFIG_FB) += arch/sparc/video/
++drivers-$(CONFIG_FB_CORE) += arch/sparc/video/
+
+ boot := arch/sparc/boot
+
+diff --git a/arch/sparc/video/Makefile b/arch/sparc/video/Makefile
+index 6baddbd58e4db..d4d83f1702c61 100644
+--- a/arch/sparc/video/Makefile
++++ b/arch/sparc/video/Makefile
+@@ -1,3 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+
+-obj-$(CONFIG_FB) += fbdev.o
++obj-$(CONFIG_FB_CORE) += fbdev.o
+diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
+index bfb7bcb362bcf..718c00367f9a5 100644
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -6,6 +6,9 @@
+ #include <linux/linkage.h>
+ #include <asm/export.h>
+ #include <asm/msr-index.h>
++#include <asm/unwind_hints.h>
++#include <asm/segment.h>
++#include <asm/cache.h>
+
+ .pushsection .noinstr.text, "ax"
+
+@@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
+ EXPORT_SYMBOL_GPL(entry_ibpb);
+
+ .popsection
++
++/*
++ * Define the VERW operand that is disguised as entry code so that
++ * it can be referenced with KPTI enabled. This ensure VERW can be
++ * used late in exit-to-user path after page tables are switched.
++ */
++.pushsection .entry.text, "ax"
++
++.align L1_CACHE_BYTES, 0xcc
++SYM_CODE_START_NOALIGN(mds_verw_sel)
++ UNWIND_HINT_UNDEFINED
++ ANNOTATE_NOENDBR
++ .word __KERNEL_DS
++.align L1_CACHE_BYTES, 0xcc
++SYM_CODE_END(mds_verw_sel);
++/* For KVM */
++EXPORT_SYMBOL_GPL(mds_verw_sel);
++
++.popsection
++
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 0091f10083143..e7b0554be04fa 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -97,7 +97,7 @@
+ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
+ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+ #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
+-/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */
++#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
+ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
+ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+ #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 0396458c201f8..a3db9647428bc 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -329,6 +329,17 @@
+ #endif
+ .endm
+
++/*
++ * Macro to execute VERW instruction that mitigate transient data sampling
++ * attacks such as MDS. On affected systems a microcode update overloaded VERW
++ * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
++ *
++ * Note: Only the memory operand variant of VERW clears the CPU buffers.
++ */
++.macro CLEAR_CPU_BUFFERS
++ ALTERNATIVE "", __stringify(verw _ASM_RIP(mds_verw_sel)), X86_FEATURE_CLEAR_CPU_BUF
++.endm
++
+ #else /* __ASSEMBLY__ */
+
+ #define ANNOTATE_RETPOLINE_SAFE \
+@@ -545,6 +556,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+
++extern u16 mds_verw_sel;
++
+ #include <asm/segment.h>
+
+ /**
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index aa39d678fe81d..dae5c952735c7 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -961,7 +961,7 @@ static int __init cmp_memblk(const void *a, const void *b)
+ const struct numa_memblk *ma = *(const struct numa_memblk **)a;
+ const struct numa_memblk *mb = *(const struct numa_memblk **)b;
+
+- return ma->start - mb->start;
++ return (ma->start > mb->start) - (ma->start < mb->start);
+ }
+
+ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+@@ -971,14 +971,12 @@ static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
+ * @start: address to begin fill
+ * @end: address to end fill
+ *
+- * Find and extend numa_meminfo memblks to cover the @start-@end
+- * physical address range, such that the first memblk includes
+- * @start, the last memblk includes @end, and any gaps in between
+- * are filled.
++ * Find and extend numa_meminfo memblks to cover the physical
++ * address range @start-@end
+ *
+ * RETURNS:
+ * 0 : Success
+- * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
++ * NUMA_NO_MEMBLK : No memblks exist in address range @start-@end
+ */
+
+ int __init numa_fill_memblks(u64 start, u64 end)
+@@ -990,17 +988,14 @@ int __init numa_fill_memblks(u64 start, u64 end)
+
+ /*
+ * Create a list of pointers to numa_meminfo memblks that
+- * overlap start, end. Exclude (start == bi->end) since
+- * end addresses in both a CFMWS range and a memblk range
+- * are exclusive.
+- *
+- * This list of pointers is used to make in-place changes
+- * that fill out the numa_meminfo memblks.
++ * overlap start, end. The list is used to make in-place
++ * changes that fill out the numa_meminfo memblks.
+ */
+ for (int i = 0; i < mi->nr_blks; i++) {
+ struct numa_memblk *bi = &mi->blk[i];
+
+- if (start < bi->end && end >= bi->start) {
++ if (memblock_addrs_overlap(start, end - start, bi->start,
++ bi->end - bi->start)) {
+ blk[count] = &mi->blk[i];
+ count++;
+ }
+diff --git a/block/blk-map.c b/block/blk-map.c
+index 8584babf3ea0c..71210cdb34426 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -205,12 +205,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
+ /*
+ * success
+ */
+- if ((iov_iter_rw(iter) == WRITE &&
+- (!map_data || !map_data->null_mapped)) ||
+- (map_data && map_data->from_user)) {
++ if (iov_iter_rw(iter) == WRITE &&
++ (!map_data || !map_data->null_mapped)) {
+ ret = bio_copy_from_iter(bio, iter);
+ if (ret)
+ goto cleanup;
++ } else if (map_data && map_data->from_user) {
++ struct iov_iter iter2 = *iter;
++
++ /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
++ iter2.data_source = ITER_SOURCE;
++ ret = bio_copy_from_iter(bio, &iter2);
++ if (ret)
++ goto cleanup;
+ } else {
+ if (bmd->is_our_pages)
+ zero_fill_bio(bio);
+diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
+index 7e9359611d69c..8fb70e3c7b9ca 100644
+--- a/drivers/accel/ivpu/ivpu_drv.c
++++ b/drivers/accel/ivpu/ivpu_drv.c
+@@ -467,9 +467,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
+ /* Clear any pending errors */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
+
+- /* VPU 37XX does not require 10m D3hot delay */
+- if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
+- pdev->d3hot_delay = 0;
++ /* NPU does not require 10m D3hot delay */
++ pdev->d3hot_delay = 0;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
+index ddf03498fd4c1..c0de7c0c991f5 100644
+--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
+@@ -562,7 +562,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+ u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
++ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
+index 03600a7a5aca8..cced6278c4f89 100644
+--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
++++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
+@@ -24,7 +24,7 @@
+ #define SKU_HW_ID_SHIFT 16u
+ #define SKU_HW_ID_MASK 0xffff0000u
+
+-#define PLL_CONFIG_DEFAULT 0x1
++#define PLL_CONFIG_DEFAULT 0x0
+ #define PLL_CDYN_DEFAULT 0x80
+ #define PLL_EPP_DEFAULT 0x80
+ #define PLL_REF_CLK_FREQ (50 * 1000000)
+@@ -523,7 +523,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+ u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
+- val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
++ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
+
+ REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
+@@ -697,7 +697,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+ {
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 tile_disable;
+- u32 tile_enable;
+ u32 fuse;
+
+ fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
+@@ -718,10 +717,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
+ else
+ ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
+
+- tile_enable = (~tile_disable) & TILE_MAX_MASK;
+-
+- hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
+- hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
+ hw->tile_fuse = tile_disable;
+ hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
+
+diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
+index baefaf7bb3cbb..d04a28e052485 100644
+--- a/drivers/accel/ivpu/ivpu_mmu.c
++++ b/drivers/accel/ivpu/ivpu_mmu.c
+@@ -491,7 +491,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
+ mmu->cmdq.cons = 0;
+
+ memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
+- clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
+ mmu->evtq.prod = 0;
+ mmu->evtq.cons = 0;
+
+@@ -805,8 +804,6 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
+ if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ return NULL;
+
+- clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
+-
+ evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
+ REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 08745e7db8201..90d33c519f4c6 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -48,6 +48,7 @@ enum {
+ enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
++ board_ahci_43bit_dma,
+ board_ahci_ign_iferr,
+ board_ahci_low_power,
+ board_ahci_no_debounce_delay,
+@@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
++ [board_ahci_43bit_dma] = {
++ AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
+ [board_ahci_ign_iferr] = {
+ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
+ .flags = AHCI_FLAG_COMMON,
+@@ -596,14 +604,19 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
+
+- /* Asmedia */
+- { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
+- { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
+- { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+- { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
+- { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
+- { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
++ /* ASMedia */
++ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
++ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
++ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
++ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
++ { PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
++ { PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
++ { PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */
++ { PCI_VDEVICE(ASMEDIA, 0x1165), board_ahci }, /* ASM1165 */
++ { PCI_VDEVICE(ASMEDIA, 0x1166), board_ahci }, /* ASM1166 */
+
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out if MSI is
+@@ -657,6 +670,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
+ {
++ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
++ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
++ hpriv->saved_port_map = 0x3f;
++ }
++
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
+ dev_info(&pdev->dev, "JMB361 has only one port\n");
+ hpriv->saved_port_map = 1;
+@@ -943,11 +961,20 @@ static int ahci_pci_device_resume(struct device *dev)
+
+ #endif /* CONFIG_PM */
+
+-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
++static int ahci_configure_dma_masks(struct pci_dev *pdev,
++ struct ahci_host_priv *hpriv)
+ {
+- const int dma_bits = using_dac ? 64 : 32;
++ int dma_bits;
+ int rc;
+
++ if (hpriv->cap & HOST_CAP_64) {
++ dma_bits = 64;
++ if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
++ dma_bits = 43;
++ } else {
++ dma_bits = 32;
++ }
++
+ /*
+ * If the device fixup already set the dma_mask to some non-standard
+ * value, don't extend it here. This happens on STA2X11, for example.
+@@ -1920,7 +1947,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ ahci_gtf_filter_workaround(host);
+
+ /* initialize adapter */
+- rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
++ rc = ahci_configure_dma_masks(pdev, hpriv);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 4bae95b06ae3c..df8f8a1a3a34c 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -247,6 +247,7 @@ enum {
+ AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
+ suspend/resume */
+ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
++ AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
+
+ /* ap->flags bits */
+
+diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
+index 64f7f7d6ba84e..11a2c199a7c24 100644
+--- a/drivers/ata/ahci_ceva.c
++++ b/drivers/ata/ahci_ceva.c
+@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
+ u32 axicc;
+ bool is_cci_enabled;
+ int flags;
+- struct reset_control *rst;
+ };
+
+ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
+@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+ };
+
++static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
++{
++ int rc, i;
++
++ rc = ahci_platform_enable_regulators(hpriv);
++ if (rc)
++ return rc;
++
++ rc = ahci_platform_enable_clks(hpriv);
++ if (rc)
++ goto disable_regulator;
++
++ /* Assert the controller reset */
++ rc = ahci_platform_assert_rsts(hpriv);
++ if (rc)
++ goto disable_clks;
++
++ for (i = 0; i < hpriv->nports; i++) {
++ rc = phy_init(hpriv->phys[i]);
++ if (rc)
++ goto disable_rsts;
++ }
++
++ /* De-assert the controller reset */
++ ahci_platform_deassert_rsts(hpriv);
++
++ for (i = 0; i < hpriv->nports; i++) {
++ rc = phy_power_on(hpriv->phys[i]);
++ if (rc) {
++ phy_exit(hpriv->phys[i]);
++ goto disable_phys;
++ }
++ }
++
++ return 0;
++
++disable_rsts:
++ ahci_platform_deassert_rsts(hpriv);
++
++disable_phys:
++ while (--i >= 0) {
++ phy_power_off(hpriv->phys[i]);
++ phy_exit(hpriv->phys[i]);
++ }
++
++disable_clks:
++ ahci_platform_disable_clks(hpriv);
++
++disable_regulator:
++ ahci_platform_disable_regulators(hpriv);
++
++ return rc;
++}
++
+ static int ceva_ahci_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
+@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ cevapriv->ahci_pdev = pdev;
+-
+- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+- NULL);
+- if (IS_ERR(cevapriv->rst))
+- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
+- "failed to get reset\n");
+-
+ hpriv = ahci_platform_get_resources(pdev, 0);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+- if (!cevapriv->rst) {
+- rc = ahci_platform_enable_resources(hpriv);
+- if (rc)
+- return rc;
+- } else {
+- int i;
++ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
++ NULL);
++ if (IS_ERR(hpriv->rsts))
++ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
++ "failed to get reset\n");
+
+- rc = ahci_platform_enable_clks(hpriv);
+- if (rc)
+- return rc;
+- /* Assert the controller reset */
+- reset_control_assert(cevapriv->rst);
+-
+- for (i = 0; i < hpriv->nports; i++) {
+- rc = phy_init(hpriv->phys[i]);
+- if (rc)
+- return rc;
+- }
+-
+- /* De-assert the controller reset */
+- reset_control_deassert(cevapriv->rst);
+-
+- for (i = 0; i < hpriv->nports; i++) {
+- rc = phy_power_on(hpriv->phys[i]);
+- if (rc) {
+- phy_exit(hpriv->phys[i]);
+- return rc;
+- }
+- }
+- }
++ rc = ceva_ahci_platform_enable_resources(hpriv);
++ if (rc)
++ return rc;
+
+ if (of_property_read_bool(np, "ceva,broken-gen2"))
+ cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
+@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
+ if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
+ (u8 *)&cevapriv->pp2c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
+ (u8 *)&cevapriv->pp2c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /* Read OOB timing value for COMWAKE from device-tree*/
+ if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
+ (u8 *)&cevapriv->pp3c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
+ (u8 *)&cevapriv->pp3c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /* Read phy BURST timing value from device-tree */
+ if (of_property_read_u8_array(np, "ceva,p0-burst-params",
+ (u8 *)&cevapriv->pp4c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-burst-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-burst-params",
+ (u8 *)&cevapriv->pp4c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-burst-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /* Read phy RETRY interval timing value from device-tree */
+ if (of_property_read_u16_array(np, "ceva,p0-retry-params",
+ (u16 *)&cevapriv->pp5c[0], 2) < 0) {
+ dev_warn(dev, "ceva,p0-retry-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ if (of_property_read_u16_array(np, "ceva,p1-retry-params",
+ (u16 *)&cevapriv->pp5c[1], 2) < 0) {
+ dev_warn(dev, "ceva,p1-retry-params property not defined\n");
+- return -EINVAL;
++ rc = -EINVAL;
++ goto disable_resources;
+ }
+
+ /*
+@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+- rc = ahci_platform_enable_resources(hpriv);
++ rc = ceva_ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d8cc1e27a125f..f12beeb96629d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2034,6 +2034,10 @@ void ata_dev_power_set_active(struct ata_device *dev)
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
++ /* If the device is already sleeping, do nothing. */
++ if (dev->flags & ATA_DFLAG_SLEEPING)
++ return;
++
+ /*
+ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ * if supported by the device.
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index cf6883756155a..37eff1c974515 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
+ struct gendisk *gd;
+ mempool_t *mp;
+ struct blk_mq_tag_set *set;
++ sector_t ssize;
+ ulong flags;
+ int late = 0;
+ int err;
+@@ -395,7 +396,7 @@ aoeblk_gdalloc(void *vp)
+ gd->minors = AOE_PARTITIONS;
+ gd->fops = &aoe_bdops;
+ gd->private_data = d;
+- set_capacity(gd, d->ssize);
++ ssize = d->ssize;
+ snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
+ d->aoemajor, d->aoeminor);
+
+@@ -404,6 +405,8 @@ aoeblk_gdalloc(void *vp)
+
+ spin_unlock_irqrestore(&d->lock, flags);
+
++ set_capacity(gd, ssize);
++
+ err = device_add_disk(NULL, gd, aoe_attr_groups);
+ if (err)
+ goto out_disk_cleanup;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 225c86c74d4e9..41b2fd7e1b9e5 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -1629,14 +1629,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
+ {
+ struct virtio_blk *vblk = vdev->priv;
+
++ /* Ensure no requests in virtqueues before deleting vqs. */
++ blk_mq_freeze_queue(vblk->disk->queue);
++
+ /* Ensure we don't receive any more interrupts */
+ virtio_reset_device(vdev);
+
+ /* Make sure no work handler is accessing the device. */
+ flush_work(&vblk->config_work);
+
+- blk_mq_quiesce_queue(vblk->disk->queue);
+-
+ vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
+
+@@ -1654,7 +1655,7 @@ static int virtblk_restore(struct virtio_device *vdev)
+
+ virtio_device_ready(vdev);
+
+- blk_mq_unquiesce_queue(vblk->disk->queue);
++ blk_mq_unfreeze_queue(vblk->disk->queue);
+ return 0;
+ }
+ #endif
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index 42c9386a7b423..f9fd1582f150d 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -117,7 +117,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
+ i++;
+ }
+
+- if (i == 0 || i % 4)
++ if (i == 0)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
+diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
+index 57186c58dc849..1d7dd3d2c101c 100644
+--- a/drivers/cache/ax45mp_cache.c
++++ b/drivers/cache/ax45mp_cache.c
+@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
+ unsigned long line_size;
+ unsigned long flags;
+
++ if (unlikely(start == end))
++ return;
++
+ line_size = ax45mp_priv.ax45mp_cache_line_size;
+ start = start & (~(line_size - 1));
++ end = ((end + line_size - 1) & (~(line_size - 1)));
+ local_irq_save(flags);
+ ax45mp_cpu_dcache_wb_range(start, end);
+ local_irq_restore(flags);
+diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+index 2621ff8a93764..de53eddf6796b 100644
+--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
+ }
+
+ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
+- struct virtio_crypto_ctrl_header *header, void *para,
++ struct virtio_crypto_ctrl_header *header,
++ struct virtio_crypto_akcipher_session_para *para,
+ const uint8_t *key, unsigned int keylen)
+ {
+ struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
+@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
+
+ ctrl = &vc_ctrl_req->ctrl;
+ memcpy(&ctrl->header, header, sizeof(ctrl->header));
+- memcpy(&ctrl->u, para, sizeof(ctrl->u));
++ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
+ input = &vc_ctrl_req->input;
+ input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+
+diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
+index 40d055560e52f..4319534558309 100644
+--- a/drivers/cxl/acpi.c
++++ b/drivers/cxl/acpi.c
+@@ -194,31 +194,27 @@ struct cxl_cfmws_context {
+ int id;
+ };
+
+-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+- const unsigned long end)
++static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
++ struct cxl_cfmws_context *ctx)
+ {
+ int target_map[CXL_DECODER_MAX_INTERLEAVE];
+- struct cxl_cfmws_context *ctx = arg;
+ struct cxl_port *root_port = ctx->root_port;
+ struct resource *cxl_res = ctx->cxl_res;
+ struct cxl_cxims_context cxims_ctx;
+ struct cxl_root_decoder *cxlrd;
+ struct device *dev = ctx->dev;
+- struct acpi_cedt_cfmws *cfmws;
+ cxl_calc_hb_fn cxl_calc_hb;
+ struct cxl_decoder *cxld;
+ unsigned int ways, i, ig;
+ struct resource *res;
+ int rc;
+
+- cfmws = (struct acpi_cedt_cfmws *) header;
+-
+ rc = cxl_acpi_cfmws_verify(dev, cfmws);
+ if (rc) {
+ dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+- return 0;
++ return rc;
+ }
+
+ rc = eiw_to_ways(cfmws->interleave_ways, &ways);
+@@ -254,7 +250,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+
+ cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
+ if (IS_ERR(cxlrd))
+- return 0;
++ return PTR_ERR(cxlrd);
+
+ cxld = &cxlrd->cxlsd.cxld;
+ cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
+@@ -295,16 +291,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ put_device(&cxld->dev);
+ else
+ rc = cxl_decoder_autoremove(dev, cxld);
+- if (rc) {
+- dev_err(dev, "Failed to add decode range: %pr", res);
+- return rc;
+- }
+- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
+- dev_name(&cxld->dev),
+- phys_to_target_node(cxld->hpa_range.start),
+- cxld->hpa_range.start, cxld->hpa_range.end);
+-
+- return 0;
++ return rc;
+
+ err_insert:
+ kfree(res->name);
+@@ -313,6 +300,29 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ return -ENOMEM;
+ }
+
++static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
++ const unsigned long end)
++{
++ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
++ struct cxl_cfmws_context *ctx = arg;
++ struct device *dev = ctx->dev;
++ int rc;
++
++ rc = __cxl_parse_cfmws(cfmws, ctx);
++ if (rc)
++ dev_err(dev,
++ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
++ cfmws->base_hpa,
++ cfmws->base_hpa + cfmws->window_size - 1, rc);
++ else
++ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
++ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
++ cfmws->base_hpa + cfmws->window_size - 1);
++
++ /* never fail cxl_acpi load for a single window failure */
++ return 0;
++}
++
+ __mock struct acpi_device *to_cxl_host_bridge(struct device *host,
+ struct device *dev)
+ {
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index c7a7887ebdcff..c963cd9e88d16 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -475,9 +475,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ allowed++;
+ }
+
+- if (!allowed) {
+- cxl_set_mem_enable(cxlds, 0);
+- info->mem_enabled = 0;
++ if (!allowed && info->mem_enabled) {
++ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
++ return -ENXIO;
+ }
+
+ /*
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index 3af795635c5ce..356298e4dd22b 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -57,6 +57,8 @@
+
+ #define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
+
++#define BUS_WIDTH_WORD_SIZE GENMASK(3, 0)
++#define BUS_WIDTH_FRAME_SIZE GENMASK(7, 4)
+ #define BUS_WIDTH_8BIT 0x00
+ #define BUS_WIDTH_16BIT 0x01
+ #define BUS_WIDTH_32BIT 0x02
+@@ -740,7 +742,8 @@ static int admac_device_config(struct dma_chan *chan,
+ struct admac_data *ad = adchan->host;
+ bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
+ int wordsize = 0;
+- u32 bus_width = 0;
++ u32 bus_width = readl_relaxed(ad->base + REG_BUS_WIDTH(adchan->no)) &
++ ~(BUS_WIDTH_WORD_SIZE | BUS_WIDTH_FRAME_SIZE);
+
+ switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+index 0745d9e7d259b..406f169b09a75 100644
+--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
++++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+@@ -176,7 +176,7 @@ dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+ };
+ struct dentry *regs_dent, *ch_dent;
+ int nr_entries, i;
+- char name[16];
++ char name[32];
+
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
+
+@@ -239,7 +239,7 @@ static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
+ };
+ struct dentry *regs_dent, *ch_dent;
+ int nr_entries, i;
+- char name[16];
++ char name[32];
+
+ regs_dent = debugfs_create_dir(READ_STR, dent);
+
+diff --git a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
+index 520c81978b085..dcdc57fe976c1 100644
+--- a/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
++++ b/drivers/dma/dw-edma/dw-hdma-v0-debugfs.c
+@@ -116,7 +116,7 @@ static void dw_hdma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+ {
+ struct dentry *regs_dent, *ch_dent;
+- char name[16];
++ char name[32];
+ int i;
+
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
+@@ -133,7 +133,7 @@ static void dw_hdma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
+ static void dw_hdma_debugfs_regs_rd(struct dw_edma *dw, struct dentry *dent)
+ {
+ struct dentry *regs_dent, *ch_dent;
+- char name[16];
++ char name[32];
+ int i;
+
+ regs_dent = debugfs_create_dir(READ_STR, dent);
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+index e4c293b76e050..781a3180baf2a 100644
+--- a/drivers/dma/fsl-qdma.c
++++ b/drivers/dma/fsl-qdma.c
+@@ -805,7 +805,7 @@ fsl_qdma_irq_init(struct platform_device *pdev,
+ int i;
+ int cpu;
+ int ret;
+- char irq_name[20];
++ char irq_name[32];
+
+ fsl_qdma->error_irq =
+ platform_get_irq_byname(pdev, "qdma-error");
+diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
+index 9c121a4b33ad8..f97d80343aea4 100644
+--- a/drivers/dma/sh/shdma.h
++++ b/drivers/dma/sh/shdma.h
+@@ -25,7 +25,7 @@ struct sh_dmae_chan {
+ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
+ int xmit_shift; /* log_2(bytes_per_xfer) */
+ void __iomem *base;
+- char dev_id[16]; /* unique name per DMAC of channel */
++ char dev_id[32]; /* unique name per DMAC of channel */
+ int pm_error;
+ dma_addr_t slave_addr;
+ };
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 33d6d931b33bb..155c409d2b434 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ dev_name(dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err_disable_pm;
++ }
++
+ ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+ ecc);
+ if (ret) {
+@@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
+ if (irq > 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ dev_name(dev));
++ if (!irq_name) {
++ ret = -ENOMEM;
++ goto err_disable_pm;
++ }
++
+ ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+ ecc);
+ if (ret) {
+diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
+index 6ac5ff20a2fe2..8aaa7fcb2630d 100644
+--- a/drivers/firewire/core-card.c
++++ b/drivers/firewire/core-card.c
+@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
+ */
+ card->bm_generation = generation;
+
+- if (root_device == NULL) {
++ if (card->gap_count == 0) {
++ /*
++ * If self IDs have inconsistent gap counts, do a
++ * bus reset ASAP. The config rom read might never
++ * complete, so don't wait for it. However, still
++ * send a PHY configuration packet prior to the
++ * bus reset. The PHY configuration packet might
++ * fail, but 1394-2008 8.4.5.2 explicitly permits
++ * it in this case, so it should be safe to try.
++ */
++ new_root_id = local_id;
++ /*
++ * We must always send a bus reset if the gap count
++ * is inconsistent, so bypass the 5-reset limit.
++ */
++ card->bm_retries = 0;
++ } else if (root_device == NULL) {
+ /*
+ * Either link_on is false, or we failed to read the
+ * config rom. In either case, pick another root.
+diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
+index 83f5bb57fa4c4..83092d93f36a6 100644
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+- int md_size = md->num_pages << EFI_PAGE_SHIFT;
++ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
+index ef0820f1a9246..59b0d7197b685 100644
+--- a/drivers/firmware/efi/efi-init.c
++++ b/drivers/firmware/efi/efi-init.c
+@@ -134,15 +134,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+- /*
+- * Special purpose memory is 'soft reserved', which means it
+- * is set aside initially, but can be hotplugged back in or
+- * be assigned to the dax driver after boot.
+- */
+- if (efi_soft_reserve_enabled() &&
+- (md->attribute & EFI_MEMORY_SP))
+- return false;
+-
+ /*
+ * According to the spec, these regions are no longer reserved
+ * after calling ExitBootServices(). However, we can only use
+@@ -187,6 +178,16 @@ static __init void reserve_regions(void)
+ size = npages << PAGE_SHIFT;
+
+ if (is_memory(md)) {
++ /*
++ * Special purpose memory is 'soft reserved', which
++ * means it is set aside initially. Don't add a memblock
++ * for it now so that it can be hotplugged back in or
++ * be assigned to the dax driver after boot.
++ */
++ if (efi_soft_reserve_enabled() &&
++ (md->attribute & EFI_MEMORY_SP))
++ continue;
++
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (!is_usable_memory(md))
+diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
+index ef4c12f0877ba..a0f1569b790da 100644
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
+ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
+ -DEFI_HAVE_STRCMP -fno-builtin -fpic \
+ $(call cc-option,-mno-single-pic-base)
+-cflags-$(CONFIG_RISCV) += -fpic
++cflags-$(CONFIG_RISCV) += -fpic -mno-relax
+ cflags-$(CONFIG_LOONGARCH) += -fpie
+
+ cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
+diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
+index 09525fb5c240e..01f0f90ea4183 100644
+--- a/drivers/firmware/efi/riscv-runtime.c
++++ b/drivers/firmware/efi/riscv-runtime.c
+@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+- int md_size = md->num_pages << EFI_PAGE_SHIFT;
++ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index a79d53bdbe136..85efd686e538d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1009,6 +1009,8 @@ struct amdgpu_device {
+ bool in_s3;
+ bool in_s4;
+ bool in_s0ix;
++ /* indicate amdgpu suspension status */
++ bool suspend_complete;
+
+ enum pp_mp1_state mp1_state;
+ struct amdgpu_doorbell_index doorbell_index;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 2c35036e4ba25..3204c3a42f2a3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2409,6 +2409,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
++ adev->suspend_complete = false;
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = true;
+ else if (amdgpu_acpi_is_s3_active(adev))
+@@ -2423,6 +2424,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
++ adev->suspend_complete = true;
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+index 468a67b302d4c..ca5c86e5f7cd6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+@@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
+ }
+ }
+
+- if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
++ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
+ ret = -EFAULT;
+
+ err_free_shared_buf:
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 2e23d08b45f4a..d7d15b618c374 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3033,6 +3033,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
+
+ gfx_v9_0_cp_gfx_enable(adev, true);
+
++ /* Now only limit the quirk on the APU gfx9 series and already
++ * confirmed that the APU gfx10/gfx11 needn't such update.
++ */
++ if (adev->flags & AMD_IS_APU &&
++ adev->in_s3 && !adev->suspend_complete) {
++ DRM_INFO(" Will skip the CSB packet resubmit\n");
++ return 0;
++ }
+ r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+index ae45656eb8779..0a601336cf697 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+@@ -426,6 +426,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
+ u32 inst_mask;
+ int i;
+
++ if (amdgpu_sriov_vf(adev))
++ adev->rmmio_remap.reg_offset =
++ SOC15_REG_OFFSET(
++ NBIO, 0,
++ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
++ << 2;
+ WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
+ 0xff & ~(adev->gfx.xcc_mask));
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 3667f9a548414..2a7c606d1d191 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -1296,10 +1296,32 @@ static int soc15_common_suspend(void *handle)
+ return soc15_common_hw_fini(adev);
+ }
+
++static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
++{
++ u32 sol_reg;
++
++ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
++
++ /* Will reset for the following suspend abort cases.
++ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
++ * 2) S3 suspend abort and TOS already launched.
++ */
++ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
++ !adev->suspend_complete &&
++ sol_reg)
++ return true;
++
++ return false;
++}
++
+ static int soc15_common_resume(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
++ if (soc15_need_reset_on_resume(adev)) {
++ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
++ soc15_asic_reset(adev);
++ }
+ return soc15_common_hw_init(adev);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 3287a39613959..12ee273e87e1e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -1482,10 +1482,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
+
+ /* Cgroup Support */
+ /* Check with device cgroup if @kfd device is accessible */
+-static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
++static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
+ {
+ #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+- struct drm_device *ddev = adev_to_drm(kfd->adev);
++ struct drm_device *ddev;
++
++ if (node->xcp)
++ ddev = node->xcp->ddev;
++ else
++ ddev = adev_to_drm(node->adev);
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
+ ddev->render->index,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 83c263e2d7171..50444ab7b3cc0 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1816,21 +1816,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
+- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+- goto error;
+- }
+- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
+- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+- goto error;
+- }
+- }
+-
+- /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+- * It is expected that DMUB will resend any pending notifications at this point, for
+- * example HPD from DPIA.
+- */
+- if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
++ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
++ * It is expected that DMUB will resend any pending notifications at this point. Note
++ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
++ * align legacy interface initialization sequence. Connection status will be proactivly
++ * detected once in the amdgpu_dm_initialize_drm_device.
++ */
+ dc_enable_dmub_outbox(adev->dm.dc);
+
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+@@ -2256,6 +2247,7 @@ static int dm_sw_fini(void *handle)
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
++ kfree(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+@@ -3484,6 +3476,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
++ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
++ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
++ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
++
++ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
++ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
++ }
++
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+@@ -3509,10 +3509,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+-
+- if (adev->dm.hpd_rx_offload_wq)
+- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+- aconnector;
+ }
+ }
+
+@@ -4481,6 +4477,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+
+ link = dc_get_link_at_index(dm->dc, i);
+
++ if (dm->hpd_rx_offload_wq)
++ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
++ aconnector;
++
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index bbf2a465f400b..4c3c4c8de1cfc 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1860,19 +1860,21 @@ static enum bp_result get_firmware_info_v3_2(
+ /* Vega12 */
+ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+ DATA_TABLES(smu_info));
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+ if (!smu_info_v3_2)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
++
+ info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+ } else if (revision.minor == 3) {
+ /* Vega20 */
+ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+ if (!smu_info_v3_3)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
++
+ info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+ }
+
+@@ -2435,10 +2437,11 @@ static enum bp_result get_integrated_info_v11(
+ info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+ DATA_TABLES(integratedsysteminfo));
+
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+ if (info_v11 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
++
+ info->gpu_cap_info =
+ le32_to_cpu(info_v11->gpucapinfo);
+ /*
+@@ -2650,11 +2653,12 @@ static enum bp_result get_integrated_info_v2_1(
+
+ info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
+ DATA_TABLES(integratedsysteminfo));
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+
+ if (info_v2_1 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
++
+ info->gpu_cap_info =
+ le32_to_cpu(info_v2_1->gpucapinfo);
+ /*
+@@ -2812,11 +2816,11 @@ static enum bp_result get_integrated_info_v2_2(
+ info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
+ DATA_TABLES(integratedsysteminfo));
+
+- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+-
+ if (info_v2_2 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
++ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
++
+ info->gpu_cap_info =
+ le32_to_cpu(info_v2_2->gpucapinfo);
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+index ed94187c2afa2..f365773d57148 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+@@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+ link->dc->link_srv->enable_hpd_filter(link, enable);
+ }
+
+-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
++bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+ {
+ return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 3f33740e2f659..5f2eac868b747 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -2116,11 +2116,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+ *
+ * @dc: pointer to dc struct
+ * @stream: pointer to all possible streams
+- * @num_streams: number of valid DPIA streams
++ * @count: number of valid DPIA streams
+ *
+ * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+ */
+-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
++bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
+ const unsigned int count);
+
+ /* Sink Interfaces - A sink corresponds to a display output device */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index cfaa39c5dd16b..83719f5bea495 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -1433,6 +1433,12 @@ struct dp_trace {
+ #ifndef DP_TUNNELING_STATUS
+ #define DP_TUNNELING_STATUS 0xE0025 /* 1.4a */
+ #endif
++#ifndef DP_TUNNELING_MAX_LINK_RATE
++#define DP_TUNNELING_MAX_LINK_RATE 0xE0028 /* 1.4a */
++#endif
++#ifndef DP_TUNNELING_MAX_LANE_COUNT
++#define DP_TUNNELING_MAX_LANE_COUNT 0xE0029 /* 1.4a */
++#endif
+ #ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
+ #define DPTX_BW_ALLOCATION_MODE_CONTROL 0xE0030 /* 1.4a */
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index accffba5a6834..cc173ecf78e0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -1110,21 +1110,25 @@ struct dc_panel_config {
+ } ilr;
+ };
+
++#define MAX_SINKS_PER_LINK 4
++
+ /*
+ * USB4 DPIA BW ALLOCATION STRUCTS
+ */
+ struct dc_dpia_bw_alloc {
+- int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
+- int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
+- int sink_max_bw; // The Max BW that sink can require/support
++ int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
++ int link_verified_bw; // The Verified BW that link can allocated and use that has been verified already
++ int link_max_bw; // The Max BW that link can require/support
++ int allocated_bw; // The Actual Allocated BW for this DPIA
+ int estimated_bw; // The estimated available BW for this DPIA
+ int bw_granularity; // BW Granularity
++ int dp_overhead; // DP overhead in dp tunneling
+ bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
+ bool response_ready; // Response ready from the CM side
++ uint8_t nrd_max_lane_count; // Non-reduced max lane count
++ uint8_t nrd_max_link_rate; // Non-reduced max link rate
+ };
+
+-#define MAX_SINKS_PER_LINK 4
+-
+ enum dc_hpd_enable_select {
+ HPD_EN_FOR_ALL_EDP = 0,
+ HPD_EN_FOR_PRIMARY_EDP_ONLY,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index b9768cd9b8a07..4901e27f678bc 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -2071,17 +2071,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
+ }
+ }
+
+- /*
+- * If the link is DP-over-USB4 do the following:
+- * - Train with fallback when enabling DPIA link. Conventional links are
++ /* Train with fallback when enabling DPIA link. Conventional links are
+ * trained with fallback during sink detection.
+- * - Allocate only what the stream needs for bw in Gbps. Inform the CM
+- * in case stream needs more or less bw from what has been allocated
+- * earlier at plug time.
+ */
+- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
++ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ do_fallback = true;
+- }
+
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+@@ -2263,6 +2257,32 @@ static enum dc_status enable_link(
+ return status;
+ }
+
++static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
++{
++ return true;
++}
++
++static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
++{
++ bool ret;
++
++ int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
++ dc_link_get_highest_encoding_format(stream->sink->link));
++
++ ret = allocate_usb4_bandwidth_for_stream(stream, bw);
++
++ return ret;
++}
++
++static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
++{
++ bool ret;
++
++ ret = allocate_usb4_bandwidth_for_stream(stream, 0);
++
++ return ret;
++}
++
+ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ {
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+@@ -2299,6 +2319,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ update_psp_stream_config(pipe_ctx, true);
+ dc->hwss.blank_stream(pipe_ctx);
+
++ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++ deallocate_usb4_bandwidth(pipe_ctx->stream);
++
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+@@ -2520,6 +2543,9 @@ void link_set_dpms_on(
+ }
+ }
+
++ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++ allocate_usb4_bandwidth(pipe_ctx->stream);
++
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+index b45fda96eaf64..5b0bc7f6a188c 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
+ return DC_OK;
+ }
+
++/*
++ * This function calculates the bandwidth required for the stream timing
++ * and aggregates the stream bandwidth for the respective dpia link
++ *
++ * @stream: pointer to the dc_stream_state struct instance
++ * @num_streams: number of streams to be validated
++ *
++ * return: true if validation is succeeded
++ */
+ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+ {
+- bool ret = true;
+- int bw_needed[MAX_DPIA_NUM];
+- struct dc_link *link[MAX_DPIA_NUM];
++ int bw_needed[MAX_DPIA_NUM] = {0};
++ struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
++ int num_dpias = 0;
++
++ for (unsigned int i = 0; i < num_streams; ++i) {
++ if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
++ /* new dpia sst stream, check whether it exceeds max dpia */
++ if (num_dpias >= MAX_DPIA_NUM)
++ return false;
+
+- if (!num_streams || num_streams > MAX_DPIA_NUM)
+- return ret;
++ dpia_link[num_dpias] = stream[i].link;
++ bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
++ dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
++ num_dpias++;
++ } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++ uint8_t j = 0;
++ /* check whether its a known dpia link */
++ for (; j < num_dpias; ++j) {
++ if (dpia_link[j] == stream[i].link)
++ break;
++ }
++
++ if (j == num_dpias) {
++ /* new dpia mst stream, check whether it exceeds max dpia */
++ if (num_dpias >= MAX_DPIA_NUM)
++ return false;
++ else {
++ dpia_link[j] = stream[i].link;
++ num_dpias++;
++ }
++ }
++
++ bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
++ dc_link_get_highest_encoding_format(dpia_link[j]));
++ }
++ }
+
+- for (uint8_t i = 0; i < num_streams; ++i) {
++ /* Include dp overheads */
++ for (uint8_t i = 0; i < num_dpias; ++i) {
++ int dp_overhead = 0;
+
+- link[i] = stream[i].link;
+- bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+- dc_link_get_highest_encoding_format(link[i]));
++ dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
++ bw_needed[i] += dp_overhead;
+ }
+
+- ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+-
+- return ret;
++ return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+index d6e1f969bfd52..5491b707cec88 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+@@ -54,11 +54,18 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+ static void reset_bw_alloc_struct(struct dc_link *link)
+ {
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+- link->dpia_bw_alloc_config.sink_verified_bw = 0;
+- link->dpia_bw_alloc_config.sink_max_bw = 0;
++ link->dpia_bw_alloc_config.link_verified_bw = 0;
++ link->dpia_bw_alloc_config.link_max_bw = 0;
++ link->dpia_bw_alloc_config.allocated_bw = 0;
+ link->dpia_bw_alloc_config.estimated_bw = 0;
+ link->dpia_bw_alloc_config.bw_granularity = 0;
++ link->dpia_bw_alloc_config.dp_overhead = 0;
+ link->dpia_bw_alloc_config.response_ready = false;
++ link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
++ link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
++ for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
++ link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
++ DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
+ }
+
+ #define BW_GRANULARITY_0 4 // 0.25 Gbps
+@@ -104,6 +111,32 @@ static int get_estimated_bw(struct dc_link *link)
+ return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ }
+
++static int get_non_reduced_max_link_rate(struct dc_link *link)
++{
++ uint8_t nrd_max_link_rate = 0;
++
++ core_link_read_dpcd(
++ link,
++ DP_TUNNELING_MAX_LINK_RATE,
++ &nrd_max_link_rate,
++ sizeof(uint8_t));
++
++ return nrd_max_link_rate;
++}
++
++static int get_non_reduced_max_lane_count(struct dc_link *link)
++{
++ uint8_t nrd_max_lane_count = 0;
++
++ core_link_read_dpcd(
++ link,
++ DP_TUNNELING_MAX_LANE_COUNT,
++ &nrd_max_lane_count,
++ sizeof(uint8_t));
++
++ return nrd_max_lane_count;
++}
++
+ /*
+ * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
+ * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
+@@ -111,13 +144,20 @@ static int get_estimated_bw(struct dc_link *link)
+ */
+ static void init_usb4_bw_struct(struct dc_link *link)
+ {
+- // Init the known values
++ reset_bw_alloc_struct(link);
++
++ /* init the known values */
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
++ link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
++ link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
+
+ DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
+ __func__, link->dpia_bw_alloc_config.bw_granularity,
+ link->dpia_bw_alloc_config.estimated_bw);
++ DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
++ __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
++ link->dpia_bw_alloc_config.nrd_max_lane_count);
+ }
+
+ static uint8_t get_lowest_dpia_index(struct dc_link *link)
+@@ -142,39 +182,50 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
+ }
+
+ /*
+- * Get the Max Available BW or Max Estimated BW for each Host Router
++ * Get the maximum dp tunnel banwidth of host router
+ *
+- * @link: pointer to the dc_link struct instance
+- * @type: ESTIMATD BW or MAX AVAILABLE BW
++ * @dc: pointer to the dc struct instance
++ * @hr_index: host router index
+ *
+- * return: response_ready flag from dc_link struct
++ * return: host router maximum dp tunnel bandwidth
+ */
+-static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
++static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
+ {
+- const struct dc *dc_struct = link->dc;
+- uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
+- uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
+- struct dc_link *link_temp;
++ uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
++ uint8_t hr_index_temp = 0;
++ struct dc_link *link_dpia_primary, *link_dpia_secondary;
+ int total_bw = 0;
+- int i;
+
+- for (i = 0; i < MAX_PIPES * 2; ++i) {
++ for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
+
+- if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
++ if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+- link_temp = dc_struct->links[i];
+- if (!link_temp || !link_temp->hpd_status)
+- continue;
+-
+- idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
+-
+- if (idx_temp == idx) {
+-
+- if (type == HOST_ROUTER_BW_ESTIMATED)
+- total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
+- else if (type == HOST_ROUTER_BW_ALLOCATED)
+- total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
++ hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
++
++ if (hr_index_temp == hr_index) {
++ link_dpia_primary = dc->links[i];
++ link_dpia_secondary = dc->links[i + 1];
++
++ /**
++ * If BW allocation enabled on both DPIAs, then
++ * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
++ * otherwise HR BW = Estimated(bw alloc enabled dpia)
++ */
++ if ((link_dpia_primary->hpd_status &&
++ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
++ (link_dpia_secondary->hpd_status &&
++ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
++ total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
++ link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
++ } else if (link_dpia_primary->hpd_status &&
++ link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
++ total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
++ } else if (link_dpia_secondary->hpd_status &&
++ link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
++ total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
++ }
++ break;
+ }
+ }
+
+@@ -194,7 +245,6 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
+ if (link) {
+ DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
+ __func__, link->link_index);
+- link->dpia_bw_alloc_config.sink_allocated_bw = 0;
+ reset_bw_alloc_struct(link);
+ }
+ }
+@@ -220,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+
+ /* Error check whether requested and allocated are equal */
+ req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+- if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
++ if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+ DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
+ __func__, link->link_index);
+ }
+@@ -343,9 +393,9 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+ DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
+ __func__, link->link_index);
+ DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
+- __func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
++ __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
+
+- link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
++ link->dpia_bw_alloc_config.allocated_bw = bw_needed;
+
+ link->dpia_bw_alloc_config.response_ready = true;
+ break;
+@@ -383,8 +433,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+ if (link->hpd_status && peak_bw > 0) {
+
+ // If DP over USB4 then we need to check BW allocation
+- link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
+- set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
++ link->dpia_bw_alloc_config.link_max_bw = peak_bw;
++ set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
+
+ do {
+ if (timeout > 0)
+@@ -396,8 +446,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+- else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+- ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
++ else if (link->dpia_bw_alloc_config.allocated_bw > 0)
++ ret = link->dpia_bw_alloc_config.allocated_bw;
+ }
+ //2. Cold Unplug
+ else if (!link->hpd_status)
+@@ -406,7 +456,6 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+ out:
+ return ret;
+ }
+-
+ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+ {
+ bool ret = false;
+@@ -414,7 +463,7 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
+
+ DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+ __func__, link->link_index, link->hpd_status,
+- link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
++ link->dpia_bw_alloc_config.allocated_bw, req_bw);
+
+ if (!get_bw_alloc_proceed_flag(link))
+ goto out;
+@@ -439,31 +488,70 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
+ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+ {
+ bool ret = true;
+- int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
+- uint8_t lowest_dpia_index = 0, dpia_index = 0;
+- uint8_t i;
++ int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
++ uint8_t lowest_dpia_index, i, hr_index;
+
+ if (!num_dpias || num_dpias > MAX_DPIA_NUM)
+ return ret;
+
+- //Get total Host Router BW & Validate against each Host Router max BW
++ lowest_dpia_index = get_lowest_dpia_index(link[0]);
++
++ /* get total Host Router BW with granularity for the given modes */
+ for (i = 0; i < num_dpias; ++i) {
++ int granularity_Gbps = 0;
++ int bw_granularity = 0;
+
+ if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
+ continue;
+
+- lowest_dpia_index = get_lowest_dpia_index(link[i]);
+ if (link[i]->link_index < lowest_dpia_index)
+ continue;
+
+- dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
+- bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
+- if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
++ granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
++ bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
++ ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+
+- ret = false;
+- break;
++ hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
++ bw_needed_per_hr[hr_index] += bw_granularity;
++ }
++
++ /* validate against each Host Router max BW */
++ for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
++ if (bw_needed_per_hr[hr_index]) {
++ host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
++ if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
++ ret = false;
++ break;
++ }
+ }
+ }
+
+ return ret;
+ }
++
++int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
++{
++ int dp_overhead = 0, link_mst_overhead = 0;
++
++ if (!get_bw_alloc_proceed_flag((link)))
++ return dp_overhead;
++
++ /* if its mst link, add MTPH overhead */
++ if ((link->type == dc_connection_mst_branch) &&
++ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
++ /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
++ * MST overhead is 1/64 of link bandwidth (excluding any overhead)
++ */
++ const struct dc_link_settings *link_cap =
++ dc_link_get_link_cap(link);
++ uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
++ (uint32_t)link_cap->lane_count *
++ LINK_RATE_REF_FREQ_IN_KHZ * 8;
++ link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
++ }
++
++ /* add all the overheads */
++ dp_overhead = link_mst_overhead;
++
++ return dp_overhead;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+index 981bc4eb6120e..3b6d8494f9d5d 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+ */
+ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
+
++/*
++ * Obtain all the DP overheads in dp tunneling for the dpia link
++ *
++ * @link: pointer to the dc_link struct instance
++ *
++ * return: DP overheads in DP tunneling
++ */
++int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
++
+ #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index 01da6789d0440..5860428da8de8 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1034,7 +1034,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ uint64_t *points;
+ uint32_t signaled_count, i;
+
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ lockdep_assert_none_held_once();
+
+ points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+@@ -1103,7 +1104,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ * fallthough and try a 0 timeout wait!
+ */
+
+- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
++ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
++ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
+ for (i = 0; i < count; ++i)
+ drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
+ }
+@@ -1378,10 +1380,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+
+ /* This happens inside the syncobj lock */
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
++ if (!fence)
++ return;
++
+ ret = dma_fence_chain_find_seqno(&fence, entry->point);
+- if (ret != 0 || !fence) {
++ if (ret != 0) {
++ /* The given seqno has not been submitted yet. */
+ dma_fence_put(fence);
+ return;
++ } else if (!fence) {
++ /* If dma_fence_chain_find_seqno returns 0 but sets the fence
++ * to NULL, it implies that the given seqno is signaled and a
++ * later seqno has already been submitted. Assign a stub fence
++ * so that the eventfd still gets signaled below.
++ */
++ fence = dma_fence_get_stub();
+ }
+
+ list_del_init(&entry->node);
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 0ce935efe5dfd..18ae41d5f4f98 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -1212,7 +1212,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_tv_format format;
+ u32 format_map;
+
+- format_map = 1 << conn_state->tv.mode;
++ format_map = 1 << conn_state->tv.legacy_mode;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+@@ -2295,7 +2295,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
+ * Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+- format_map = 1 << conn_state->tv.mode;
++ format_map = 1 << conn_state->tv.legacy_mode;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+@@ -2360,7 +2360,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
+ int i;
+
+ for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+- if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
++ if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
+ *val = i;
+
+ return 0;
+@@ -2416,7 +2416,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
+ struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
+
+ if (property == intel_sdvo_connector->tv_format) {
+- state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
++ state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
+
+ if (state->crtc) {
+ struct drm_crtc_state *crtc_state =
+@@ -3071,7 +3071,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ drm_property_add_enum(intel_sdvo_connector->tv_format, i,
+ tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+- intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
++ intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
+ drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+ intel_sdvo_connector->tv_format, 0);
+ return true;
+diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
+index d84a79491da23..042ed966807ed 100644
+--- a/drivers/gpu/drm/i915/display/intel_tv.c
++++ b/drivers/gpu/drm/i915/display/intel_tv.c
+@@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
+
+ static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
+ {
+- int format = conn_state->tv.mode;
++ int format = conn_state->tv.legacy_mode;
+
+ return &tv_modes[format];
+ }
+@@ -1710,7 +1710,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
+ break;
+ }
+
+- connector->state->tv.mode = i;
++ connector->state->tv.legacy_mode = i;
+ }
+
+ static int
+@@ -1865,7 +1865,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
+ old_state = drm_atomic_get_old_connector_state(state, connector);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
+
+- if (old_state->tv.mode != new_state->tv.mode ||
++ if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
+ old_state->tv.margins.left != new_state->tv.margins.left ||
+ old_state->tv.margins.right != new_state->tv.margins.right ||
+ old_state->tv.margins.top != new_state->tv.margins.top ||
+@@ -1902,7 +1902,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
+ conn_state->tv.margins.right = 46;
+ conn_state->tv.margins.bottom = 37;
+
+- conn_state->tv.mode = 0;
++ conn_state->tv.legacy_mode = 0;
+
+ /* Create TV properties then attach current values */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+@@ -1916,7 +1916,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
+
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.legacy_tv_mode_property,
+- conn_state->tv.mode);
++ conn_state->tv.legacy_mode);
+ drm_object_attach_property(&connector->base,
+ i915->drm.mode_config.tv_left_margin_property,
+ conn_state->tv.margins.left);
+diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+index 3f73b211fa8e3..3407450435e20 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
+ if (priv->encoders[MESON_ENC_CVBS]) {
+ meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
+ drm_bridge_remove(&meson_encoder_cvbs->bridge);
+- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
+ }
+ }
+diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
+index 3f93c70488cad..311b91630fbe5 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
+@@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
+ if (priv->encoders[MESON_ENC_DSI]) {
+ meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
+ drm_bridge_remove(&meson_encoder_dsi->bridge);
+- drm_bridge_remove(meson_encoder_dsi->next_bridge);
+ }
+ }
+diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+index 25ea765586908..c4686568c9ca5 100644
+--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
++++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+@@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
+ if (priv->encoders[MESON_ENC_HDMI]) {
+ meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
+ drm_bridge_remove(&meson_encoder_hdmi->bridge);
+- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
+ }
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+index 19188683c8fca..8c2bf1c16f2a9 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
+ return (void *)fw;
+ }
+
++static void
++shadow_fw_release(void *fw)
++{
++ release_firmware(fw);
++}
++
+ static const struct nvbios_source
+ shadow_fw = {
+ .name = "firmware",
+ .init = shadow_fw_init,
+- .fini = (void(*)(void *))release_firmware,
++ .fini = shadow_fw_release,
+ .read = shadow_fw_read,
+ .rw = false,
+ };
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index cddb9151d20f4..9b60222511d65 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -384,7 +384,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
+ enum ttm_caching caching,
+ pgoff_t start_page, pgoff_t end_page)
+ {
+- struct page **pages = tt->pages;
++ struct page **pages = &tt->pages[start_page];
+ unsigned int order;
+ pgoff_t i, nr;
+
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 7bf12ca0eb4a9..4519ee377aa76 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4650,6 +4650,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
+ { /* Logitech G Pro X Superlight Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
++ { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
+
+ { /* G935 Gaming Headset */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
+diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
+index c463e54decbce..edd0b0f1193bd 100644
+--- a/drivers/hid/hid-nvidia-shield.c
++++ b/drivers/hid/hid-nvidia-shield.c
+@@ -800,6 +800,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
+
+ led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
+ "thunderstrike%d:blue:led", ts->id);
++ if (!led->name)
++ return -ENOMEM;
+ led->max_brightness = 1;
+ led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
+ led->brightness_get = &thunderstrike_led_get_brightness;
+@@ -831,6 +833,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
+ shield_dev->battery_dev.desc.name =
+ devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
+ "thunderstrike_%d", ts->id);
++ if (!shield_dev->battery_dev.desc.name)
++ return -ENOMEM;
+
+ shield_dev->battery_dev.psy = power_supply_register(
+ &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 95f4c0b00b2d8..b8fc8d1ef20df 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
+ #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
++#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
+ #define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index 92a49fafe2c02..f3bf2e4701c38 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
+ const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
+ int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
++ int num_reg_temp_config;
+ struct device *hwmon_dev;
+ struct sensor_template_group tsi_temp_tg;
+
+@@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+@@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+@@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6775_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6775_REG_TEMP_HYST;
+ reg_temp_config = NCT6775_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6775_REG_TEMP_CRIT;
+
+@@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6775_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6775_REG_TEMP_HYST;
+ reg_temp_config = NCT6776_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6776_REG_TEMP_CRIT;
+
+@@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6779_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6779_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6779_REG_TEMP_CRIT;
+
+@@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6779_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6779_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6779_REG_TEMP_CRIT;
+
+@@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ reg_temp_over = NCT6798_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6798_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
++ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
+ reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6798_REG_TEMP_CRIT;
+
+@@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ = reg_temp_crit[src - 1];
+ if (reg_temp_crit_l && reg_temp_crit_l[i])
+ data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
+- data->reg_temp_config[src - 1] = reg_temp_config[i];
++ if (i < num_reg_temp_config)
++ data->reg_temp_config[src - 1] = reg_temp_config[i];
+ data->temp_src[src - 1] = src;
+ continue;
+ }
+@@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
+ data->reg_temp[0][s] = reg_temp[i];
+ data->reg_temp[1][s] = reg_temp_over[i];
+ data->reg_temp[2][s] = reg_temp_hyst[i];
+- data->reg_temp_config[s] = reg_temp_config[i];
++ if (i < num_reg_temp_config)
++ data->reg_temp_config[s] = reg_temp_config[i];
+ if (reg_temp_crit_h && reg_temp_crit_h[i])
+ data->reg_temp[3][s] = reg_temp_crit_h[i];
+ else if (reg_temp_crit[src - 1])
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 1775a79aeba2a..0951bfdc89cfa 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
+ ctl &= ~I2CR_MTX;
+ imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
++
++ /* flag the last byte as processed */
++ i2c_imx_slave_event(i2c_imx,
++ I2C_SLAVE_READ_PROCESSED, &value);
++
+ i2c_imx_slave_finish_op(i2c_imx);
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index faa88d12ee868..cc466dfd792b0 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1809,7 +1809,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+- break;
++ return -EINVAL;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+@@ -1824,13 +1824,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+- break;
++ return 0;
+ default:
+ ibdev_err(&rdev->ibdev,
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+- return 0;
+ }
+
+ int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index abbabea7f5fa3..2a62239187622 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -748,7 +748,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+- srq->threshold = le16_to_cpu(sb->srq_limit);
++ if (!rc)
++ srq->threshold = le16_to_cpu(sb->srq_limit);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index dfea53e0fdeb8..5eb309ead7076 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
+ "Unable to allocate credit return DMA range for NUMA %d\n",
+ i);
+ ret = -ENOMEM;
+- goto done;
++ goto free_cr_base;
+ }
+ }
+ set_dev_node(&dd->pcidev->dev, dd->node);
+@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
+ ret = 0;
+ done:
+ return ret;
++
++free_cr_base:
++ free_credit_return(dd);
++ goto done;
+ }
+
+ void free_credit_return(struct hfi1_devdata *dd)
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 26c62162759ba..969c5c3ab859e 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ {
+ int rval = 0;
+
+- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
++ if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ rval = _extend_sdma_tx_descs(dd, tx);
+ if (rval) {
+ __sdma_txclean(dd, tx);
+diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
+index d06e45d2c23fd..9052e8932dc18 100644
+--- a/drivers/infiniband/hw/irdma/defs.h
++++ b/drivers/infiniband/hw/irdma/defs.h
+@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
+ #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+ #define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+ #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
++#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
+ #define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+ #define IRDMA_AE_RESET_SENT 0x0601
+ #define IRDMA_AE_TERMINATE_SENT 0x0602
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 564c9188e1f84..1745f40b075fd 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
++ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ default:
+@@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
+ irq_update_affinity_hint(msix_vec->irq, NULL);
+ free_irq(msix_vec->irq, dev_id);
++ if (rf == dev_id) {
++ tasklet_kill(&rf->dpc_tasklet);
++ } else {
++ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
++
++ tasklet_kill(&iwceq->dpc_tasklet);
++ }
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
+index 2f1bedd3a5201..60618b2046b97 100644
+--- a/drivers/infiniband/hw/irdma/verbs.c
++++ b/drivers/infiniband/hw/irdma/verbs.c
+@@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+
+ if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
+ init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
++ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
++ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
++ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+@@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
+ info.cq_base_pa = iwcq->kmem.pa;
+ }
+
+- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+- (u32)IRDMA_MAX_CQ_READ_THRESH);
++ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
++ (u32)IRDMA_MAX_CQ_READ_THRESH);
+
+ if (irdma_sc_cq_init(cq, &info)) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
+diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
+index f87531318feb8..a78a067e3ce7f 100644
+--- a/drivers/infiniband/hw/mlx5/cong.c
++++ b/drivers/infiniband/hw/mlx5/cong.c
+@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
+ dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
+
+ for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
++ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
++ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
++ if (!MLX5_CAP_GEN(mdev, roce) ||
++ !MLX5_CAP_ROCE(mdev, roce_cc_general))
++ continue;
++
+ dbg_cc_params->params[i].offset = i;
+ dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 7887a6786ed43..f118ce0a9a617 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
+ /* RQ - read access only (0) */
+ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
+ ureq.rq_len, true, 0, alloc_and_init);
+- if (rc)
++ if (rc) {
++ ib_umem_release(qp->usq.umem);
++ qp->usq.umem = NULL;
++ if (rdma_protocol_roce(&dev->ibdev, 1)) {
++ qedr_free_pbl(dev, &qp->usq.pbl_info,
++ qp->usq.pbl_tbl);
++ } else {
++ kfree(qp->usq.pbl_tbl);
++ }
+ return rc;
++ }
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index c12005eab14c1..015bfeede90e1 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
+ MODULE_PARM_DESC(srpt_srq_size,
+ "Shared receive queue (SRQ) size.");
+
++static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
++{
++ return kstrtou64(buffer, 16, (u64 *)kp->arg);
++}
+ static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
+ {
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
+ }
+-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+- 0444);
++module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
++ &srpt_service_guid, 0444);
+ MODULE_PARM_DESC(srpt_service_guid,
+ "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
+
+@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
+ /**
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+- * @ch: SRPT RDMA channel.
++ * @ptr: SRPT RDMA channel.
+ */
+-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
++static void srpt_qp_event(struct ib_event *event, void *ptr)
+ {
++ struct srpt_rdma_ch *ch = ptr;
++
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+@@ -1807,8 +1813,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ ch->cq_size = ch->rq_size + sq_size;
+
+ qp_init->qp_context = (void *)ch;
+- qp_init->event_handler
+- = (void(*)(struct ib_event *, void*))srpt_qp_event;
++ qp_init->event_handler = srpt_qp_event;
+ qp_init->send_cq = ch->cq;
+ qp_init->recv_cq = ch->cq;
+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index e2c1848182de9..d0bb3edfd0a09 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -294,6 +294,7 @@ static const struct xpad_device {
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++ { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
+ { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+@@ -491,6 +492,7 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x15e4), /* Numark Xbox 360 controllers */
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech Xbox 360 controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
++ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
+ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
+ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index cd45a65e17f2c..dfc6c581873b7 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -634,6 +634,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
++ {
++ /* Fujitsu Lifebook U728 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
++ },
+ {
+ /* Gigabyte M912 */
+ .matches = {
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index af32fbe57b630..b068ff8afbc9a 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+ }
+ }
+
+- if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
++ /* Some devices with gpio_int_idx 0 list a third unused GPIO */
++ if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ gpio_mapping = acpi_goodix_int_first_gpios;
+ } else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 3632c92cd183c..676c9250d3f28 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3181,6 +3181,7 @@ static void its_cpu_init_lpis(void)
+ val |= GICR_CTLR_ENABLE_LPIS;
+ writel_relaxed(val, rbase + GICR_CTLR);
+
++out:
+ if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+@@ -3216,7 +3217,6 @@ static void its_cpu_init_lpis(void)
+
+ /* Make sure the GIC has seen the above */
+ dsb(sy);
+-out:
+ gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
+ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ smp_processor_id(),
+diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
+index 5101a3fb11df5..58881d3139792 100644
+--- a/drivers/irqchip/irq-mbigen.c
++++ b/drivers/irqchip/irq-mbigen.c
+@@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
+ static int mbigen_of_create_domain(struct platform_device *pdev,
+ struct mbigen_device *mgn_chip)
+ {
+- struct device *parent;
+ struct platform_device *child;
+ struct irq_domain *domain;
+ struct device_node *np;
+ u32 num_pins;
+ int ret = 0;
+
+- parent = bus_get_dev_root(&platform_bus_type);
+- if (!parent)
+- return -ENODEV;
+-
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ continue;
+
+- child = of_platform_device_create(np, NULL, parent);
++ child = of_platform_device_create(np, NULL, NULL);
+ if (!child) {
+ ret = -ENOMEM;
+ break;
+@@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
+ }
+ }
+
+- put_device(parent);
+ if (ret)
+ of_node_put(np);
+
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index 5b7bc4fd9517c..bf0b40b0fad4b 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -148,7 +148,13 @@ static void plic_irq_eoi(struct irq_data *d)
+ {
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++ if (unlikely(irqd_irq_disabled(d))) {
++ plic_toggle(handler, d->hwirq, 1);
++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++ plic_toggle(handler, d->hwirq, 0);
++ } else {
++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
++ }
+ }
+
+ #ifdef CONFIG_SMP
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 1a539ec81bacc..5a296db23feb3 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -62,6 +62,8 @@ struct convert_context {
+ struct skcipher_request *req;
+ struct aead_request *req_aead;
+ } r;
++ bool aead_recheck;
++ bool aead_failed;
+
+ };
+
+@@ -82,6 +84,8 @@ struct dm_crypt_io {
+ blk_status_t error;
+ sector_t sector;
+
++ struct bvec_iter saved_bi_iter;
++
+ struct rb_node rb_node;
+ } CRYPTO_MINALIGN_ATTR;
+
+@@ -1376,10 +1380,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
+ if (r == -EBADMSG) {
+ sector_t s = le64_to_cpu(*sector);
+
+- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+- ctx->bio_in->bi_bdev, s);
+- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+- ctx->bio_in, s, 0);
++ ctx->aead_failed = true;
++ if (ctx->aead_recheck) {
++ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
++ ctx->bio_in->bi_bdev, s);
++ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
++ ctx->bio_in, s, 0);
++ }
+ }
+
+ if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
+@@ -1763,6 +1770,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ io->base_bio = bio;
+ io->sector = sector;
+ io->error = 0;
++ io->ctx.aead_recheck = false;
++ io->ctx.aead_failed = false;
+ io->ctx.r.req = NULL;
+ io->integrity_metadata = NULL;
+ io->integrity_metadata_from_pool = false;
+@@ -1774,6 +1783,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
+ atomic_inc(&io->io_pending);
+ }
+
++static void kcryptd_queue_read(struct dm_crypt_io *io);
++
+ /*
+ * One of the bios was finished. Check for completion of
+ * the whole request and correctly clean up the buffer.
+@@ -1787,6 +1798,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ if (!atomic_dec_and_test(&io->io_pending))
+ return;
+
++ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
++ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
++ io->ctx.aead_recheck = true;
++ io->ctx.aead_failed = false;
++ io->error = 0;
++ kcryptd_queue_read(io);
++ return;
++ }
++
+ if (io->ctx.r.req)
+ crypt_free_req(cc, io->ctx.r.req, base_bio);
+
+@@ -1822,15 +1842,19 @@ static void crypt_endio(struct bio *clone)
+ struct dm_crypt_io *io = clone->bi_private;
+ struct crypt_config *cc = io->cc;
+ unsigned int rw = bio_data_dir(clone);
+- blk_status_t error;
++ blk_status_t error = clone->bi_status;
++
++ if (io->ctx.aead_recheck && !error) {
++ kcryptd_queue_crypt(io);
++ return;
++ }
+
+ /*
+ * free the processed pages
+ */
+- if (rw == WRITE)
++ if (rw == WRITE || io->ctx.aead_recheck)
+ crypt_free_buffer_pages(cc, clone);
+
+- error = clone->bi_status;
+ bio_put(clone);
+
+ if (rw == READ && !error) {
+@@ -1851,6 +1875,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
+ struct crypt_config *cc = io->cc;
+ struct bio *clone;
+
++ if (io->ctx.aead_recheck) {
++ if (!(gfp & __GFP_DIRECT_RECLAIM))
++ return 1;
++ crypt_inc_pending(io);
++ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
++ if (unlikely(!clone)) {
++ crypt_dec_pending(io);
++ return 1;
++ }
++ clone->bi_iter.bi_sector = cc->start + io->sector;
++ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
++ io->saved_bi_iter = clone->bi_iter;
++ dm_submit_bio_remap(io->base_bio, clone);
++ return 0;
++ }
++
+ /*
+ * We need the original biovec array in order to decrypt the whole bio
+ * data *afterwards* -- thanks to immutable biovecs we don't need to
+@@ -2077,6 +2117,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ io->ctx.bio_out = clone;
+ io->ctx.iter_out = clone->bi_iter;
+
++ if (crypt_integrity_aead(cc)) {
++ bio_copy_data(clone, io->base_bio);
++ io->ctx.bio_in = clone;
++ io->ctx.iter_in = clone->bi_iter;
++ }
++
+ sector += bio_sectors(clone);
+
+ crypt_inc_pending(io);
+@@ -2113,6 +2159,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+
+ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
+ {
++ if (io->ctx.aead_recheck) {
++ if (!io->error) {
++ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
++ bio_copy_data(io->base_bio, io->ctx.bio_in);
++ }
++ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
++ bio_put(io->ctx.bio_in);
++ }
+ crypt_dec_pending(io);
+ }
+
+@@ -2142,11 +2196,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
+
+ crypt_inc_pending(io);
+
+- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+- io->sector);
++ if (io->ctx.aead_recheck) {
++ io->ctx.cc_sector = io->sector + cc->iv_offset;
++ r = crypt_convert(cc, &io->ctx,
++ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++ } else {
++ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
++ io->sector);
+
+- r = crypt_convert(cc, &io->ctx,
+- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++ r = crypt_convert(cc, &io->ctx,
++ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
++ }
+ /*
+ * Crypto API backlogged the request, because its queue was full
+ * and we're in softirq context, so continue from a workqueue
+@@ -2188,10 +2248,13 @@ static void kcryptd_async_done(void *data, int error)
+ if (error == -EBADMSG) {
+ sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
+
+- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+- ctx->bio_in->bi_bdev, s);
+- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+- ctx->bio_in, s, 0);
++ ctx->aead_failed = true;
++ if (ctx->aead_recheck) {
++ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
++ ctx->bio_in->bi_bdev, s);
++ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
++ ctx->bio_in, s, 0);
++ }
+ io->error = BLK_STS_PROTECTION;
+ } else if (error < 0)
+ io->error = BLK_STS_IOERR;
+@@ -3117,7 +3180,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
+ sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
+ if (!strcasecmp(sval, "aead")) {
+ set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
+- } else if (strcasecmp(sval, "none")) {
++ } else if (strcasecmp(sval, "none")) {
+ ti->error = "Unknown integrity profile";
+ return -EINVAL;
+ }
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 9261bbebd662a..68923c36b6d4c 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -278,6 +278,8 @@ struct dm_integrity_c {
+
+ atomic64_t number_of_mismatches;
+
++ mempool_t recheck_pool;
++
+ struct notifier_block reboot_notifier;
+ };
+
+@@ -1699,6 +1701,77 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
+ get_random_bytes(result, ic->tag_size);
+ }
+
++static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
++{
++ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
++ struct dm_integrity_c *ic = dio->ic;
++ struct bvec_iter iter;
++ struct bio_vec bv;
++ sector_t sector, logical_sector, area, offset;
++ struct page *page;
++ void *buffer;
++
++ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
++ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
++ &dio->metadata_offset);
++ sector = get_data_sector(ic, area, offset);
++ logical_sector = dio->range.logical_sector;
++
++ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
++ buffer = page_to_virt(page);
++
++ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
++ unsigned pos = 0;
++
++ do {
++ char *mem;
++ int r;
++ struct dm_io_request io_req;
++ struct dm_io_region io_loc;
++ io_req.bi_opf = REQ_OP_READ;
++ io_req.mem.type = DM_IO_KMEM;
++ io_req.mem.ptr.addr = buffer;
++ io_req.notify.fn = NULL;
++ io_req.client = ic->io;
++ io_loc.bdev = ic->dev->bdev;
++ io_loc.sector = sector;
++ io_loc.count = ic->sectors_per_block;
++
++ r = dm_io(&io_req, 1, &io_loc, NULL);
++ if (unlikely(r)) {
++ dio->bi_status = errno_to_blk_status(r);
++ goto free_ret;
++ }
++
++ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
++ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
++ &dio->metadata_offset, ic->tag_size, TAG_CMP);
++ if (r) {
++ if (r > 0) {
++ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
++ bio->bi_bdev, logical_sector);
++ atomic64_inc(&ic->number_of_mismatches);
++ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
++ bio, logical_sector, 0);
++ r = -EILSEQ;
++ }
++ dio->bi_status = errno_to_blk_status(r);
++ goto free_ret;
++ }
++
++ mem = bvec_kmap_local(&bv);
++ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
++ kunmap_local(mem);
++
++ pos += ic->sectors_per_block << SECTOR_SHIFT;
++ sector += ic->sectors_per_block;
++ logical_sector += ic->sectors_per_block;
++ } while (pos < bv.bv_len);
++ }
++free_ret:
++ mempool_free(page, &ic->recheck_pool);
++}
++
+ static void integrity_metadata(struct work_struct *w)
+ {
+ struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+@@ -1786,15 +1859,8 @@ static void integrity_metadata(struct work_struct *w)
+ checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
+ if (unlikely(r)) {
+ if (r > 0) {
+- sector_t s;
+-
+- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
+- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+- bio->bi_bdev, s);
+- r = -EILSEQ;
+- atomic64_inc(&ic->number_of_mismatches);
+- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+- bio, s, 0);
++ integrity_recheck(dio, checksums);
++ goto skip_io;
+ }
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
+@@ -4271,6 +4337,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
+ goto bad;
+ }
+
++ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
++ if (r) {
++ ti->error = "Cannot allocate mempool";
++ goto bad;
++ }
++
+ ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
+ WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
+ if (!ic->metadata_wq) {
+@@ -4619,6 +4691,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ kvfree(ic->bbs);
+ if (ic->bufio)
+ dm_bufio_client_destroy(ic->bufio);
++ mempool_exit(&ic->recheck_pool);
+ mempool_exit(&ic->journal_io_mempool);
+ if (ic->io)
+ dm_io_client_destroy(ic->io);
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 82662f5769c4a..7b620b187da90 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -482,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ return 0;
+ }
+
++static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
++ u8 *data, size_t len)
++{
++ memcpy(data, io->recheck_buffer, len);
++ io->recheck_buffer += len;
++
++ return 0;
++}
++
++static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
++ struct bvec_iter start, sector_t cur_block)
++{
++ struct page *page;
++ void *buffer;
++ int r;
++ struct dm_io_request io_req;
++ struct dm_io_region io_loc;
++
++ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
++ buffer = page_to_virt(page);
++
++ io_req.bi_opf = REQ_OP_READ;
++ io_req.mem.type = DM_IO_KMEM;
++ io_req.mem.ptr.addr = buffer;
++ io_req.notify.fn = NULL;
++ io_req.client = v->io;
++ io_loc.bdev = v->data_dev->bdev;
++ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
++ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
++ r = dm_io(&io_req, 1, &io_loc, NULL);
++ if (unlikely(r))
++ goto free_ret;
++
++ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
++ 1 << v->data_dev_block_bits,
++ verity_io_real_digest(v, io), true);
++ if (unlikely(r))
++ goto free_ret;
++
++ if (memcmp(verity_io_real_digest(v, io),
++ verity_io_want_digest(v, io), v->digest_size)) {
++ r = -EIO;
++ goto free_ret;
++ }
++
++ io->recheck_buffer = buffer;
++ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
++ if (unlikely(r))
++ goto free_ret;
++
++ r = 0;
++free_ret:
++ mempool_free(page, &v->recheck_pool);
++
++ return r;
++}
++
+ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+ {
+@@ -508,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ {
+ bool is_zero;
+ struct dm_verity *v = io->v;
+-#if defined(CONFIG_DM_VERITY_FEC)
+ struct bvec_iter start;
+-#endif
+ struct bvec_iter iter_copy;
+ struct bvec_iter *iter;
+ struct crypto_wait wait;
+@@ -561,10 +616,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ if (unlikely(r < 0))
+ return r;
+
+-#if defined(CONFIG_DM_VERITY_FEC)
+- if (verity_fec_is_enabled(v))
+- start = *iter;
+-#endif
++ start = *iter;
+ r = verity_for_io_block(v, io, iter, &wait);
+ if (unlikely(r < 0))
+ return r;
+@@ -586,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io)
+ * tasklet since it may sleep, so fallback to work-queue.
+ */
+ return -EAGAIN;
++ } else if (verity_recheck(v, io, start, cur_block) == 0) {
++ if (v->validated_blocks)
++ set_bit(cur_block, v->validated_blocks);
++ continue;
+ #if defined(CONFIG_DM_VERITY_FEC)
+ } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
+ cur_block, NULL, &start) == 0) {
+@@ -941,6 +997,10 @@ static void verity_dtr(struct dm_target *ti)
+ if (v->verify_wq)
+ destroy_workqueue(v->verify_wq);
+
++ mempool_exit(&v->recheck_pool);
++ if (v->io)
++ dm_io_client_destroy(v->io);
++
+ if (v->bufio)
+ dm_bufio_client_destroy(v->bufio);
+
+@@ -1379,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ }
+ v->hash_blocks = hash_position;
+
++ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
++ if (unlikely(r)) {
++ ti->error = "Cannot allocate mempool";
++ goto bad;
++ }
++
++ v->io = dm_io_client_create();
++ if (IS_ERR(v->io)) {
++ r = PTR_ERR(v->io);
++ v->io = NULL;
++ ti->error = "Cannot allocate dm io";
++ goto bad;
++ }
++
+ v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
+ 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
+ dm_bufio_alloc_callback, NULL,
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index f3f6070084196..4620a98c99561 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -11,6 +11,7 @@
+ #ifndef DM_VERITY_H
+ #define DM_VERITY_H
+
++#include <linux/dm-io.h>
+ #include <linux/dm-bufio.h>
+ #include <linux/device-mapper.h>
+ #include <linux/interrupt.h>
+@@ -68,6 +69,9 @@ struct dm_verity {
+ unsigned long *validated_blocks; /* bitset blocks validated */
+
+ char *signature_key_desc; /* signature keyring reference */
++
++ struct dm_io_client *io;
++ mempool_t recheck_pool;
+ };
+
+ struct dm_verity_io {
+@@ -84,6 +88,8 @@ struct dm_verity_io {
+
+ struct work_struct work;
+
++ char *recheck_buffer;
++
+ /*
+ * Three variably-size fields follow this struct:
+ *
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 108590041db64..ce0516bdf8fa8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -530,8 +530,12 @@ static void submit_flushes(struct work_struct *ws)
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+- if (atomic_dec_and_test(&mddev->flush_pending))
++ if (atomic_dec_and_test(&mddev->flush_pending)) {
++ /* The pair is percpu_ref_get() from md_flush_request() */
++ percpu_ref_put(&mddev->active_io);
++
+ queue_work(md_wq, &mddev->flush_work);
++ }
+ }
+
+ static void md_submit_flush_data(struct work_struct *ws)
+diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
+index 8aea2d070a40c..d279a4f195e2a 100644
+--- a/drivers/misc/open-dice.c
++++ b/drivers/misc/open-dice.c
+@@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ *drvdata = (struct open_dice_drvdata){
+- .lock = __MUTEX_INITIALIZER(drvdata->lock),
+ .rmem = rmem,
+ .misc = (struct miscdevice){
+ .parent = dev,
+@@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
+ .mode = 0600,
+ },
+ };
++ mutex_init(&drvdata->lock);
+
+ /* Index overflow check not needed, misc_register() will fail. */
+ snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+index 41a6098eb0c2f..4b6bf2764bef7 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+@@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ int j = 0, i;
+
+ for (i = 0; i < NUM_NET_FILTERS; i++) {
+- if (j == *rule_cnt)
+- return -EMSGSIZE;
+-
+ if (!priv->net_filters[i].claimed ||
+ priv->net_filters[i].port != intf->port)
+ continue;
+@@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ priv->net_filters[i - 1].wake_filter)
+ continue;
+
++ if (j == *rule_cnt)
++ return -EMSGSIZE;
++
+ rule_locs[j++] = priv->net_filters[i].fs.location;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+index 53e5428812552..9cae5a3090000 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+@@ -1048,6 +1048,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
+ netdev_err(dev, "could not attach to PHY\n");
+ goto err_phy_disable;
+ }
++
++ /* Indicate that the MAC is responsible for PHY PM */
++ phydev->mac_managed_pm = true;
+ } else if (!intf->wolopts) {
+ ret = phy_resume(dev->phydev);
+ if (ret)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+index 3784347b6fd88..55639c133dd02 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+@@ -437,6 +437,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ return;
+ }
+
++ /* AF modifies given action iff PF/VF has requested for it */
++ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
++ return;
++
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+index dc9af480bfea1..8f116982c08a2 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, sparx5);
+ sparx5->pdev = pdev;
+ sparx5->dev = &pdev->dev;
++ spin_lock_init(&sparx5->tx_lock);
+
+ /* Do switch core reset if available */
+ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+index 6f565c0c0c3dc..316fed5f27355 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+@@ -280,6 +280,7 @@ struct sparx5 {
+ int xtr_irq;
+ /* Frame DMA */
+ int fdma_irq;
++ spinlock_t tx_lock; /* lock for frame transmission */
+ struct sparx5_rx rx;
+ struct sparx5_tx tx;
+ /* PTP */
+diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+index 6db6ac6a3bbc2..ac7e1cffbcecf 100644
+--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
++++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ skb_tx_timestamp(skb);
++ spin_lock(&sparx5->tx_lock);
+ if (sparx5->fdma_irq > 0)
+ ret = sparx5_fdma_xmit(sparx5, ifh, skb);
+ else
+ ret = sparx5_inject(sparx5, ifh, skb, dev);
++ spin_unlock(&sparx5->tx_lock);
+
+ if (ret == -EBUSY)
+ goto busy;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index f1614ad2daaa7..5b3423d1af3f3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5951,11 +5951,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- if (unlikely(!dev)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -5971,11 +5966,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+- if (unlikely(!dev)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -5997,11 +5987,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+
+- if (unlikely(!data)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+@@ -6028,11 +6013,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+
+- if (unlikely(!data)) {
+- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+- return IRQ_NONE;
+- }
+-
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index b1919278e931f..2129ae42c7030 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1907,20 +1907,20 @@ static int __init gtp_init(void)
+ if (err < 0)
+ goto error_out;
+
+- err = genl_register_family(&gtp_genl_family);
++ err = register_pernet_subsys(&gtp_net_ops);
+ if (err < 0)
+ goto unreg_rtnl_link;
+
+- err = register_pernet_subsys(&gtp_net_ops);
++ err = genl_register_family(&gtp_genl_family);
+ if (err < 0)
+- goto unreg_genl_family;
++ goto unreg_pernet_subsys;
+
+ pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
+ sizeof(struct pdp_ctx));
+ return 0;
+
+-unreg_genl_family:
+- genl_unregister_family(&gtp_genl_family);
++unreg_pernet_subsys:
++ unregister_pernet_subsys(&gtp_net_ops);
+ unreg_rtnl_link:
+ rtnl_link_unregister(&gtp_link_ops);
+ error_out:
+diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
+index 4bc05948f772d..a78c692f2d3c5 100644
+--- a/drivers/net/ipa/ipa_interrupt.c
++++ b/drivers/net/ipa/ipa_interrupt.c
+@@ -212,7 +212,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+ u32 unit_count;
+ u32 unit;
+
+- unit_count = roundup(ipa->endpoint_count, 32);
++ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
+index 894172a3e15fe..337899c69738e 100644
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -421,9 +421,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
+ ERR_PTR(ret));
+ return ret;
+ }
++
++ return genphy_soft_reset(phydev);
+ }
+
+- return genphy_soft_reset(phydev);
++ return 0;
+ }
+
+ static int rtl821x_suspend(struct phy_device *phydev)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index e3120ab893f4e..878d9416a1085 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -668,7 +668,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+@@ -792,7 +791,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+@@ -1003,8 +1001,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
+ if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iftype_data->eht_cap.has_eht) {
+ iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
+- ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
++ ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
+ ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 46cce0ec35e9a..cdb1e706f855e 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
+ static DEFINE_IDA(nvme_fc_local_port_cnt);
+ static DEFINE_IDA(nvme_fc_ctrl_cnt);
+
+-static struct workqueue_struct *nvme_fc_wq;
+-
+-static bool nvme_fc_waiting_to_unload;
+-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
+-
+ /*
+ * These items are short-term. They will eventually be moved into
+ * a generic FC class. See comments in module init.
+@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
+ /* remove from transport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&lport->port_list);
+- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
+- complete(&nvme_fc_unload_proceed);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
+@@ -3893,10 +3886,6 @@ static int __init nvme_fc_init_module(void)
+ {
+ int ret;
+
+- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+- if (!nvme_fc_wq)
+- return -ENOMEM;
+-
+ /*
+ * NOTE:
+ * It is expected that in the future the kernel will combine
+@@ -3914,7 +3903,7 @@ static int __init nvme_fc_init_module(void)
+ ret = class_register(&fc_class);
+ if (ret) {
+ pr_err("couldn't register class fc\n");
+- goto out_destroy_wq;
++ return ret;
+ }
+
+ /*
+@@ -3938,8 +3927,6 @@ static int __init nvme_fc_init_module(void)
+ device_destroy(&fc_class, MKDEV(0, 0));
+ out_destroy_class:
+ class_unregister(&fc_class);
+-out_destroy_wq:
+- destroy_workqueue(nvme_fc_wq);
+
+ return ret;
+ }
+@@ -3959,45 +3946,23 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
+ spin_unlock(&rport->lock);
+ }
+
+-static void
+-nvme_fc_cleanup_for_unload(void)
++static void __exit nvme_fc_exit_module(void)
+ {
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+-
+- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+- list_for_each_entry(rport, &lport->endp_list, endp_list) {
+- nvme_fc_delete_controllers(rport);
+- }
+- }
+-}
+-
+-static void __exit nvme_fc_exit_module(void)
+-{
+ unsigned long flags;
+- bool need_cleanup = false;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+- nvme_fc_waiting_to_unload = true;
+- if (!list_empty(&nvme_fc_lport_list)) {
+- need_cleanup = true;
+- nvme_fc_cleanup_for_unload();
+- }
++ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
++ list_for_each_entry(rport, &lport->endp_list, endp_list)
++ nvme_fc_delete_controllers(rport);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+- if (need_cleanup) {
+- pr_info("%s: waiting for ctlr deletes\n", __func__);
+- wait_for_completion(&nvme_fc_unload_proceed);
+- pr_info("%s: ctrl deletes complete\n", __func__);
+- }
++ flush_workqueue(nvme_delete_wq);
+
+ nvmf_unregister_transport(&nvme_fc_transport);
+
+- ida_destroy(&nvme_fc_local_port_cnt);
+- ida_destroy(&nvme_fc_ctrl_cnt);
+-
+ device_destroy(&fc_class, MKDEV(0, 0));
+ class_unregister(&fc_class);
+- destroy_workqueue(nvme_fc_wq);
+ }
+
+ module_init(nvme_fc_init_module);
+diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
+index 1ab6601fdd5cf..8a02ed63b1566 100644
+--- a/drivers/nvme/target/fc.c
++++ b/drivers/nvme/target/fc.c
+@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
+ struct nvmet_fc_port_entry *pe;
+ struct kref ref;
+ u32 max_sg_cnt;
++
++ struct work_struct put_work;
+ };
+
+ struct nvmet_fc_port_entry {
+@@ -165,7 +167,7 @@ struct nvmet_fc_tgt_assoc {
+ struct nvmet_fc_hostport *hostport;
+ struct nvmet_fc_ls_iod *rcv_disconn;
+ struct list_head a_list;
+- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
++ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
+ struct kref ref;
+ struct work_struct del_work;
+ struct rcu_head rcu;
+@@ -248,6 +250,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+ static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+ static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
++static void nvmet_fc_put_tgtport_work(struct work_struct *work)
++{
++ struct nvmet_fc_tgtport *tgtport =
++ container_of(work, struct nvmet_fc_tgtport, put_work);
++
++ nvmet_fc_tgtport_put(tgtport);
++}
+ static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_fcp_iod *fod);
+@@ -359,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+
+ if (!lsop->req_queued) {
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+- return;
++ goto out_putwork;
+ }
+
+ list_del(&lsop->lsreq_list);
+@@ -372,7 +381,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
+ (lsreq->rqstlen + lsreq->rsplen),
+ DMA_BIDIRECTIONAL);
+
+- nvmet_fc_tgtport_put(tgtport);
++out_putwork:
++ queue_work(nvmet_wq, &tgtport->put_work);
+ }
+
+ static int
+@@ -801,14 +811,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ if (!queue)
+ return NULL;
+
+- if (!nvmet_fc_tgt_a_get(assoc))
+- goto out_free_queue;
+-
+ queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+ assoc->tgtport->fc_target_port.port_num,
+ assoc->a_id, qid);
+ if (!queue->work_q)
+- goto out_a_put;
++ goto out_free_queue;
+
+ queue->qid = qid;
+ queue->sqsize = sqsize;
+@@ -830,15 +837,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+ goto out_fail_iodlist;
+
+ WARN_ON(assoc->queues[qid]);
+- rcu_assign_pointer(assoc->queues[qid], queue);
++ assoc->queues[qid] = queue;
+
+ return queue;
+
+ out_fail_iodlist:
+ nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+ destroy_workqueue(queue->work_q);
+-out_a_put:
+- nvmet_fc_tgt_a_put(assoc);
+ out_free_queue:
+ kfree(queue);
+ return NULL;
+@@ -851,12 +856,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
+ struct nvmet_fc_tgt_queue *queue =
+ container_of(ref, struct nvmet_fc_tgt_queue, ref);
+
+- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
+-
+ nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+- nvmet_fc_tgt_a_put(queue->assoc);
+-
+ destroy_workqueue(queue->work_q);
+
+ kfree_rcu(queue, rcu);
+@@ -968,7 +969,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (association_id == assoc->association_id) {
+- queue = rcu_dereference(assoc->queues[qid]);
++ queue = assoc->queues[qid];
+ if (queue &&
+ (!atomic_read(&queue->connected) ||
+ !nvmet_fc_tgt_q_get(queue)))
+@@ -1077,8 +1078,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ /* new allocation not needed */
+ kfree(newhost);
+ newhost = match;
+- /* no new allocation - release reference */
+- nvmet_fc_tgtport_put(tgtport);
+ } else {
+ newhost->tgtport = tgtport;
+ newhost->hosthandle = hosthandle;
+@@ -1093,13 +1092,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ }
+
+ static void
+-nvmet_fc_delete_assoc(struct work_struct *work)
++nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
++{
++ nvmet_fc_delete_target_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
++}
++
++static void
++nvmet_fc_delete_assoc_work(struct work_struct *work)
+ {
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(work, struct nvmet_fc_tgt_assoc, del_work);
++ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+
+- nvmet_fc_delete_target_assoc(assoc);
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_delete_assoc(assoc);
++ nvmet_fc_tgtport_put(tgtport);
++}
++
++static void
++nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
++{
++ nvmet_fc_tgtport_get(assoc->tgtport);
++ queue_work(nvmet_wq, &assoc->del_work);
+ }
+
+ static struct nvmet_fc_tgt_assoc *
+@@ -1111,6 +1125,9 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ int idx;
+ bool needrandom = true;
+
++ if (!tgtport->pe)
++ return NULL;
++
+ assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+ if (!assoc)
+ return NULL;
+@@ -1130,7 +1147,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
+ assoc->a_id = idx;
+ INIT_LIST_HEAD(&assoc->a_list);
+ kref_init(&assoc->ref);
+- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
++ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
+ atomic_set(&assoc->terminating, 0);
+
+ while (needrandom) {
+@@ -1171,13 +1188,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+ struct nvmet_fc_ls_iod *oldls;
+ unsigned long flags;
++ int i;
++
++ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
++ if (assoc->queues[i])
++ nvmet_fc_delete_target_queue(assoc->queues[i]);
++ }
+
+ /* Send Disconnect now that all i/o has completed */
+ nvmet_fc_xmt_disconnect_assoc(assoc);
+
+ nvmet_fc_free_hostport(assoc->hostport);
+ spin_lock_irqsave(&tgtport->lock, flags);
+- list_del_rcu(&assoc->a_list);
+ oldls = assoc->rcv_disconn;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+ /* if pending Rcv Disconnect Association LS, send rsp now */
+@@ -1207,7 +1229,7 @@ static void
+ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ {
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+- struct nvmet_fc_tgt_queue *queue;
++ unsigned long flags;
+ int i, terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+@@ -1216,29 +1238,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+ if (terminating)
+ return;
+
++ spin_lock_irqsave(&tgtport->lock, flags);
++ list_del_rcu(&assoc->a_list);
++ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+- rcu_read_lock();
+- queue = rcu_dereference(assoc->queues[i]);
+- if (!queue) {
+- rcu_read_unlock();
+- continue;
+- }
++ synchronize_rcu();
+
+- if (!nvmet_fc_tgt_q_get(queue)) {
+- rcu_read_unlock();
+- continue;
+- }
+- rcu_read_unlock();
+- nvmet_fc_delete_target_queue(queue);
+- nvmet_fc_tgt_q_put(queue);
++ /* ensure all in-flight I/Os have been processed */
++ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
++ if (assoc->queues[i])
++ flush_workqueue(assoc->queues[i]->work_q);
+ }
+
+ dev_info(tgtport->dev,
+ "{%d:%d} Association deleted\n",
+ tgtport->fc_target_port.port_num, assoc->a_id);
+-
+- nvmet_fc_tgt_a_put(assoc);
+ }
+
+ static struct nvmet_fc_tgt_assoc *
+@@ -1414,6 +1428,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+ kref_init(&newrec->ref);
+ ida_init(&newrec->assoc_cnt);
+ newrec->max_sg_cnt = template->max_sgl_segments;
++ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
+
+ ret = nvmet_fc_alloc_ls_iodlist(newrec);
+ if (ret) {
+@@ -1491,9 +1506,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ if (!nvmet_fc_tgt_a_get(assoc))
+ continue;
+- if (!queue_work(nvmet_wq, &assoc->del_work))
+- /* already deleting - release local reference */
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
+ }
+ rcu_read_unlock();
+ }
+@@ -1546,9 +1560,8 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
+ continue;
+ assoc->hostport->invalid = 1;
+ noassoc = false;
+- if (!queue_work(nvmet_wq, &assoc->del_work))
+- /* already deleting - release local reference */
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
+ }
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+@@ -1580,7 +1593,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+- queue = rcu_dereference(assoc->queues[0]);
++ queue = assoc->queues[0];
+ if (queue && queue->nvme_sq.ctrl == ctrl) {
+ if (nvmet_fc_tgt_a_get(assoc))
+ found_ctrl = true;
+@@ -1592,9 +1605,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+ nvmet_fc_tgtport_put(tgtport);
+
+ if (found_ctrl) {
+- if (!queue_work(nvmet_wq, &assoc->del_work))
+- /* already deleting - release local reference */
+- nvmet_fc_tgt_a_put(assoc);
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
+ return;
+ }
+
+@@ -1624,6 +1636,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(tgtport);
+
++ flush_workqueue(nvmet_wq);
++
+ /*
+ * should terminate LS's as well. However, LS's will be generated
+ * at the tail end of association termination, so they likely don't
+@@ -1869,9 +1883,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
+ FCNVME_LS_DISCONNECT_ASSOC);
+
+- /* release get taken in nvmet_fc_find_target_assoc */
+- nvmet_fc_tgt_a_put(assoc);
+-
+ /*
+ * The rules for LS response says the response cannot
+ * go back until ABTS's have been sent for all outstanding
+@@ -1886,8 +1897,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ assoc->rcv_disconn = iod;
+ spin_unlock_irqrestore(&tgtport->lock, flags);
+
+- nvmet_fc_delete_target_assoc(assoc);
+-
+ if (oldls) {
+ dev_info(tgtport->dev,
+ "{%d:%d} Multiple Disconnect Association LS's "
+@@ -1903,6 +1912,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+ nvmet_fc_xmt_ls_rsp(tgtport, oldls);
+ }
+
++ nvmet_fc_schedule_delete_assoc(assoc);
++ nvmet_fc_tgt_a_put(assoc);
++
+ return false;
+ }
+
+@@ -2539,8 +2551,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+
+ fod->req.cmd = &fod->cmdiubuf.sqe;
+ fod->req.cqe = &fod->rspiubuf.cqe;
+- if (tgtport->pe)
+- fod->req.port = tgtport->pe->port;
++ if (!tgtport->pe)
++ goto transport_error;
++ fod->req.port = tgtport->pe->port;
+
+ /* clear any response payload */
+ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+@@ -2901,6 +2914,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
+
+ nvmet_fc_portentry_unbind(pe);
+
++ /* terminate any outstanding associations */
++ __nvmet_fc_free_assocs(pe->tgtport);
++
+ kfree(pe);
+ }
+
+@@ -2932,6 +2948,9 @@ static int __init nvmet_fc_init_module(void)
+
+ static void __exit nvmet_fc_exit_module(void)
+ {
++ /* ensure any shutdown operation, e.g. delete ctrls have finished */
++ flush_workqueue(nvmet_wq);
++
+ /* sanity check - all lports should be removed */
+ if (!list_empty(&nvmet_fc_target_list))
+ pr_warn("%s: targetport list not empty\n", __func__);
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index c65a73433c05f..e6d4226827b52 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
+ if (!rport->targetport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&rport->lock);
+- list_add_tail(&rport->ls_list, &tls_req->ls_list);
++ list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ return ret;
+@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+- list_add_tail(&rport->ls_list, &tls_req->ls_list);
++ list_add_tail(&tls_req->ls_list, &rport->ls_list);
+ spin_unlock(&rport->lock);
+ queue_work(nvmet_wq, &rport->ls_work);
+ }
+@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
+ if (!tport->remoteport) {
+ tls_req->status = -ECONNREFUSED;
+ spin_lock(&tport->lock);
+- list_add_tail(&tport->ls_list, &tls_req->ls_list);
++ list_add_tail(&tls_req->ls_list, &tport->ls_list);
+ spin_unlock(&tport->lock);
+ queue_work(nvmet_wq, &tport->ls_work);
+ return ret;
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index a4f802790ca02..8e5d547aa16cb 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1927,6 +1927,7 @@ static void __exit nvmet_tcp_exit(void)
+ flush_workqueue(nvmet_wq);
+
+ destroy_workqueue(nvmet_tcp_wq);
++ ida_destroy(&nvmet_tcp_queue_ida);
+ }
+
+ module_init(nvmet_tcp_init);
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 8d79dd0e1d605..9d1f259fe3573 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -6,6 +6,7 @@
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ */
+
++#include <linux/align.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+
+@@ -598,7 +599,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
+ }
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+- msg_addr &= ~aligned_offset;
++ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ epc->mem->window.page_size);
+ if (ret)
+diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
+index c8be056c248de..cfd84a899c82d 100644
+--- a/drivers/pci/msi/irqdomain.c
++++ b/drivers/pci/msi/irqdomain.c
+@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
+
+ return (irq_hw_number_t)desc->msi_index |
+ pci_dev_id(dev) << 11 |
+- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
++ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
+ }
+
+ static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index ab7d7a1235b83..39828eb84e0ba 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -47,6 +47,9 @@
+ /* Message with data needs at least two words (for header & data). */
+ #define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+
++/* Tx timeout in milliseconds. */
++#define TMFIFO_TX_TIMEOUT 2000
++
+ /* ACPI UID for BlueField-3. */
+ #define TMFIFO_BF3_UID 1
+
+@@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
+ * @drop_desc: dummy desc for packet dropping
+ * @cur_len: processed length of the current descriptor
+ * @rem_len: remaining length of the pending packet
++ * @rem_padding: remaining bytes to send as paddings
+ * @pkt_len: total length of the pending packet
+ * @next_avail: next avail descriptor id
+ * @num: vring size (number of descriptors)
+ * @align: vring alignment size
+ * @index: vring index
+ * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
++ * @tx_timeout: expire time of last tx packet
+ * @fifo: pointer to the tmfifo structure
+ */
+ struct mlxbf_tmfifo_vring {
+@@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
+ struct vring_desc drop_desc;
+ int cur_len;
+ int rem_len;
++ int rem_padding;
+ u32 pkt_len;
+ u16 next_avail;
+ int num;
+ int align;
+ int index;
+ int vdev_id;
++ unsigned long tx_timeout;
+ struct mlxbf_tmfifo *fifo;
+ };
+
+@@ -819,6 +826,50 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ return true;
+ }
+
++static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
++{
++ unsigned long flags;
++
++ /* Only handle Tx timeout for network vdev. */
++ if (vring->vdev_id != VIRTIO_ID_NET)
++ return;
++
++ /* Initialize the timeout or return if not expired. */
++ if (!vring->tx_timeout) {
++ /* Initialize the timeout. */
++ vring->tx_timeout = jiffies +
++ msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
++ return;
++ } else if (time_before(jiffies, vring->tx_timeout)) {
++ /* Return if not timeout yet. */
++ return;
++ }
++
++ /*
++ * Drop the packet after timeout. The outstanding packet is
++ * released and the remaining bytes will be sent with padding byte 0x00
++ * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
++ * either dropped directly, or appended into existing outstanding packet
++ * thus dropped as corrupted network packet.
++ */
++ vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
++ mlxbf_tmfifo_release_pkt(vring);
++ vring->cur_len = 0;
++ vring->rem_len = 0;
++ vring->fifo->vring[0] = NULL;
++
++ /*
++ * Make sure the load/store are in order before
++ * returning back to virtio.
++ */
++ virtio_mb(false);
++
++ /* Notify upper layer. */
++ spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
++ vring_interrupt(0, vring->vq);
++ spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
++}
++
+ /* Rx & Tx processing of a queue. */
+ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ {
+@@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ return;
+
+ do {
++retry:
+ /* Get available FIFO space. */
+ if (avail == 0) {
+ if (is_rx)
+@@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ break;
+ }
+
++ /* Insert paddings for discarded Tx packet. */
++ if (!is_rx) {
++ vring->tx_timeout = 0;
++ while (vring->rem_padding >= sizeof(u64)) {
++ writeq(0, vring->fifo->tx.data);
++ vring->rem_padding -= sizeof(u64);
++ if (--avail == 0)
++ goto retry;
++ }
++ }
++
+ /* Console output always comes from the Tx buffer. */
+ if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
+ mlxbf_tmfifo_console_tx(fifo, avail);
+@@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
+ /* Handle one descriptor. */
+ more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
+ } while (more);
++
++ /* Check Tx timeout. */
++ if (avail <= 0 && !is_rx)
++ mlxbf_tmfifo_check_tx_timeout(vring);
+ }
+
+ /* Handle Rx or Tx queues. */
+diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
+index 210b0a81b7ecb..084c355c86f5f 100644
+--- a/drivers/platform/x86/intel/vbtn.c
++++ b/drivers/platform/x86/intel/vbtn.c
+@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
+ autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
+
+ sparse_keymap_report_event(input_dev, event, val, autorelease);
+-
+- /* Some devices need this to report further events */
+- acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ }
+
+ /*
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index d73cbae4aa218..89c37a83d7fcd 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10308,6 +10308,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ return 0;
+ default:
+ /* Unknown function */
++ pr_debug("unknown function 0x%x\n", funcmode);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+@@ -10493,8 +10494,8 @@ static void dytc_profile_refresh(void)
+ return;
+
+ perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
+- convert_dytc_to_profile(funcmode, perfmode, &profile);
+- if (profile != dytc_current_profile) {
++ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
++ if (!err && profile != dytc_current_profile) {
+ dytc_current_profile = profile;
+ platform_profile_notify();
+ }
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 0c67337726984..969477c83e56e 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
+ };
+
+ static const struct ts_dmi_data chuwi_hi8_air_data = {
+- .acpi_name = "MSSL1680:00",
++ .acpi_name = "MSSL1680",
+ .properties = chuwi_hi8_air_props,
+ };
+
+@@ -944,6 +944,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
+ .properties = teclast_tbook11_props,
+ };
+
++static const struct property_entry teclast_x16_plus_props[] = {
++ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
++ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
++ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
++ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
++ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
++ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
++ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++ PROPERTY_ENTRY_BOOL("silead,home-button"),
++ { }
++};
++
++static const struct ts_dmi_data teclast_x16_plus_data = {
++ .embedded_fw = {
++ .name = "silead/gsl3692-teclast-x16-plus.fw",
++ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
++ .length = 43560,
++ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
++ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
++ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
++ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
++ },
++ .acpi_name = "MSSL1680:00",
++ .properties = teclast_x16_plus_props,
++};
++
+ static const struct property_entry teclast_x3_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+@@ -1612,6 +1638,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
+ },
+ },
++ {
++ /* Teclast X16 Plus */
++ .driver_data = (void *)&teclast_x16_plus_data,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
++ DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
++ },
++ },
+ {
+ /* Teclast X3 Plus */
+ .driver_data = (void *)&teclast_x3_plus_data,
+@@ -1786,7 +1821,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
+ int error;
+
+ if (has_acpi_companion(dev) &&
+- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
++ strstarts(client->name, ts_data->acpi_name)) {
+ error = device_create_managed_software_node(dev, ts_data->properties, NULL);
+ if (error)
+ dev_err(dev, "failed to add properties: %d\n", error);
+diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
+index b56a174cde3df..5c2d49ae332fb 100644
+--- a/drivers/regulator/max5970-regulator.c
++++ b/drivers/regulator/max5970-regulator.c
+@@ -265,7 +265,7 @@ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
+ return ret;
+
+ if (*val)
+- return regmap_write(map, reg, *val);
++ return regmap_write(map, reg, 0);
+
+ return 0;
+ }
+diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
+index 2aff6db748e2c..e33d10df7a763 100644
+--- a/drivers/regulator/pwm-regulator.c
++++ b/drivers/regulator/pwm-regulator.c
+@@ -158,6 +158,9 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
+ pwm_get_state(drvdata->pwm, &pstate);
+
+ voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
++ if (voltage < min(max_uV_duty, min_uV_duty) ||
++ voltage > max(max_uV_duty, min_uV_duty))
++ return -ENOTRECOVERABLE;
+
+ /*
+ * The dutycycle for min_uV might be greater than the one for max_uV.
+diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
+index c533d1dadc6bb..a5dba3829769c 100644
+--- a/drivers/s390/cio/device_ops.c
++++ b/drivers/s390/cio/device_ops.c
+@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ return -EINVAL;
+ if (cdev->private->state == DEV_STATE_NOT_OPER)
+ return -ENODEV;
+- if (cdev->private->state == DEV_STATE_VERIFY) {
++ if (cdev->private->state == DEV_STATE_VERIFY ||
++ cdev->private->flags.doverify) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
+@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
+ }
+ if (cdev->private->state != DEV_STATE_ONLINE ||
+ ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
+- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
+- cdev->private->flags.doverify)
++ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
+ return -EBUSY;
+ ret = cio_set_options (sch, flags);
+ if (ret)
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 695a57d894cdd..23bce8995a557 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -1285,7 +1285,7 @@ source "drivers/scsi/arm/Kconfig"
+
+ config JAZZ_ESP
+ bool "MIPS JAZZ FAS216 SCSI support"
+- depends on MACH_JAZZ && SCSI
++ depends on MACH_JAZZ && SCSI=y
+ select SCSI_SPI_ATTRS
+ help
+ This is the driver for the onboard SCSI host adapter of MIPS Magnum
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index d26941b131fdb..bf879d81846b6 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -1918,7 +1918,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+-static int
++static uint32_t
+ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datasegcnt,
+ struct lpfc_io_buf *lpfc_cmd)
+@@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct sli4_sge_diseed *diseed = NULL;
+ dma_addr_t physaddr;
+- int i = 0, num_sge = 0, status;
+- uint32_t reftag;
++ int i = 0, status;
++ uint32_t reftag, num_sge = 0;
+ uint8_t txop, rxop;
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t rc;
+@@ -2099,7 +2099,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ *
+ * Returns the number of SGEs added to the SGL.
+ **/
+-static int
++static uint32_t
+ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct sli4_sge *sgl, int datacnt, int protcnt,
+ struct lpfc_io_buf *lpfc_cmd)
+@@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ uint32_t rc;
+ #endif
+ uint32_t checking = 1;
+- uint32_t dma_offset = 0;
+- int num_sge = 0, j = 2;
++ uint32_t dma_offset = 0, num_sge = 0;
++ int j = 2;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
+
+ sgpe = scsi_prot_sglist(sc);
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 89367c4bf0ef5..bd66612c0a507 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ return result + 4;
+ }
+
++enum scsi_vpd_parameters {
++ SCSI_VPD_HEADER_SIZE = 4,
++ SCSI_VPD_LIST_SIZE = 36,
++};
++
+ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ {
+- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
++ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
+ int result;
+
+ if (sdev->no_vpd_size)
+ return SCSI_DEFAULT_VPD_LEN;
+
++ /*
++ * Fetch the supported pages VPD and validate that the requested page
++ * number is present.
++ */
++ if (page != 0) {
++ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
++ if (result < SCSI_VPD_HEADER_SIZE)
++ return 0;
++
++ result -= SCSI_VPD_HEADER_SIZE;
++ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
++ return 0;
++ }
+ /*
+ * Fetch the VPD page header to find out how big the page
+ * is. This is done to prevent problems on legacy devices
+ * which can not handle allocation lengths as large as
+ * potentially requested by the caller.
+ */
+- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
++ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
+ if (result < 0)
+ return 0;
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index c2e8d9e27749b..1335119a49933 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3404,6 +3404,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
+ return true;
+ }
+
++static void sd_read_block_zero(struct scsi_disk *sdkp)
++{
++ unsigned int buf_len = sdkp->device->sector_size;
++ char *buffer, cmd[10] = { };
++
++ buffer = kmalloc(buf_len, GFP_KERNEL);
++ if (!buffer)
++ return;
++
++ cmd[0] = READ_10;
++ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
++ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
++
++ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
++ SD_TIMEOUT, sdkp->max_retries, NULL);
++ kfree(buffer);
++}
++
+ /**
+ * sd_revalidate_disk - called the first time a new disk is seen,
+ * performs disk spin up, read_capacity, etc.
+@@ -3443,7 +3461,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ */
+ if (sdkp->media_present) {
+ sd_read_capacity(sdkp, buffer);
+-
++ /*
++ * Some USB/UAS devices return generic values for mode pages
++ * until the media has been accessed. Trigger a READ operation
++ * to force the device to populate mode pages.
++ */
++ if (sdp->read_before_ms)
++ sd_read_block_zero(sdkp);
+ /*
+ * set the default to rotational. All non-rotational devices
+ * support the block characteristics VPD page, which will
+diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
+index 0419401835169..cdedc271857aa 100644
+--- a/drivers/scsi/smartpqi/smartpqi.h
++++ b/drivers/scsi/smartpqi/smartpqi.h
+@@ -1347,7 +1347,6 @@ struct pqi_ctrl_info {
+ bool controller_online;
+ bool block_requests;
+ bool scan_blocked;
+- u8 logical_volume_rescan_needed : 1;
+ u8 inbound_spanning_supported : 1;
+ u8 outbound_spanning_supported : 1;
+ u8 pqi_mode_enabled : 1;
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 9a58df9312fa7..868453b18c9ae 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -2093,8 +2093,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+- if (ctrl_info->logical_volume_rescan_needed)
+- existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+@@ -2164,6 +2162,20 @@ static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
+ INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
+ }
+
++static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
++{
++ if (pqi_device_in_remove(device))
++ return false;
++
++ if (device->sdev == NULL)
++ return false;
++
++ if (!scsi_device_online(device->sdev))
++ return false;
++
++ return device->rescan;
++}
++
+ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
+ {
+@@ -2284,9 +2296,13 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
+- if (device->rescan) {
+- scsi_rescan_device(device->sdev);
++ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
++ if (pqi_volume_rescan_needed(device)) {
+ device->rescan = false;
++ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
++ scsi_rescan_device(device->sdev);
++ } else {
++ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ }
+ }
+ }
+@@ -2308,8 +2324,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+ }
+ }
+
+- ctrl_info->logical_volume_rescan_needed = false;
+-
+ }
+
+ static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+@@ -3702,6 +3716,21 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+ return ack_event;
+ }
+
++static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
++{
++ unsigned long flags;
++ struct pqi_scsi_dev *device;
++
++ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
++
++ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
++ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
++ device->rescan = true;
++ }
++
++ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
++}
++
+ static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+ {
+ unsigned long flags;
+@@ -3742,7 +3771,7 @@ static void pqi_event_worker(struct work_struct *work)
+ ack_event = true;
+ rescan_needed = true;
+ if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
+- ctrl_info->logical_volume_rescan_needed = true;
++ pqi_mark_volumes_for_rescan(ctrl_info);
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
+ }
+@@ -6504,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
+ {
+ struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
++ if (!ctrl_info->disable_managed_interrupts)
++ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0);
++ else
++ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ }
+
+ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
+@@ -10142,6 +10174,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1014, 0x0718)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1137, 0x02f8)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1137, 0x02f9)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1137, 0x02fa)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1000)
+@@ -10198,6 +10242,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100a)
+ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x100e)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x100f)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1010)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1011)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1043)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1044)
++ },
++ {
++ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
++ 0x1f51, 0x1045)
++ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_ANY_ID, PCI_ANY_ID)
+diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
+index d239fc5a49ccc..c1556b6529092 100644
+--- a/drivers/spi/spi-cs42l43.c
++++ b/drivers/spi/spi-cs42l43.c
+@@ -244,7 +244,10 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
+ priv->ctlr->use_gpio_descriptors = true;
+ priv->ctlr->auto_runtime_pm = true;
+
+- devm_pm_runtime_enable(priv->dev);
++ ret = devm_pm_runtime_enable(priv->dev);
++ if (ret)
++ return ret;
++
+ pm_runtime_idle(priv->dev);
+
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
+diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
+index 9d22018f7985f..1301d14483d48 100644
+--- a/drivers/spi/spi-hisi-sfc-v3xx.c
++++ b/drivers/spi/spi-hisi-sfc-v3xx.c
+@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
+ static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
+ {
+ struct hisi_sfc_v3xx_host *host = data;
++ u32 reg;
++
++ reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
++ if (!reg)
++ return IRQ_NONE;
+
+ hisi_sfc_v3xx_disable_int(host);
+
+diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
+index b9918dcc38027..07d20ca1164c3 100644
+--- a/drivers/spi/spi-intel-pci.c
++++ b/drivers/spi/spi-intel-pci.c
+@@ -76,6 +76,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
++ { PCI_VDEVICE(INTEL, 0x7f24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index cfc3b1ddbd229..6f12e4fb2e2e1 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -136,14 +136,14 @@ struct sh_msiof_spi_priv {
+
+ /* SIFCTR */
+ #define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
+-#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
+-#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
+-#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
+-#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
+-#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
+-#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
+-#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
+-#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
++#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
++#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
++#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
++#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
++#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
++#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
++#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
++#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
+ #define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
+ #define SIFCTR_TFUA_SHIFT 20
+ #define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index b6523d4b9259e..86590a7e29f6a 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -147,7 +147,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
+ struct se_session *se_sess = se_cmd->se_sess;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+- unsigned long flags;
+
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
+@@ -178,10 +177,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
+ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+
+- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(transport_lookup_tmr_lun);
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 0d4f09693ef46..da59c1ac2f2e6 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -907,12 +907,15 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+
+ return 0;
+ fail:
+- if (bio)
+- bio_put(bio);
++ if (bio) {
++ bio_uninit(bio);
++ kfree(bio);
++ }
+ while (req->bio) {
+ bio = req->bio;
+ req->bio = bio->bi_next;
+- bio_put(bio);
++ bio_uninit(bio);
++ kfree(bio);
+ }
+ req->biotail = NULL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0686882bcbda3..fb93d74c5d0b2 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3627,6 +3627,10 @@ int transport_generic_handle_tmr(
+ unsigned long flags;
+ bool aborted = false;
+
++ spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
++ list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
++ spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
++
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ aborted = true;
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index cd3913b933c76..362bbcdece0d7 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1345,11 +1345,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
+ }
+ }
+
++static void pl011_rs485_tx_start(struct uart_amba_port *uap)
++{
++ struct uart_port *port = &uap->port;
++ u32 cr;
++
++ /* Enable transmitter */
++ cr = pl011_read(uap, REG_CR);
++ cr |= UART011_CR_TXE;
++
++ /* Disable receiver if half-duplex */
++ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
++ cr &= ~UART011_CR_RXE;
++
++ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
++ cr &= ~UART011_CR_RTS;
++ else
++ cr |= UART011_CR_RTS;
++
++ pl011_write(cr, uap, REG_CR);
++
++ if (port->rs485.delay_rts_before_send)
++ mdelay(port->rs485.delay_rts_before_send);
++
++ uap->rs485_tx_started = true;
++}
++
+ static void pl011_start_tx(struct uart_port *port)
+ {
+ struct uart_amba_port *uap =
+ container_of(port, struct uart_amba_port, port);
+
++ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
++ !uap->rs485_tx_started)
++ pl011_rs485_tx_start(uap);
++
+ if (!pl011_dma_tx_start(uap))
+ pl011_start_tx_pio(uap);
+ }
+@@ -1431,42 +1461,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
+ return true;
+ }
+
+-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+-{
+- struct uart_port *port = &uap->port;
+- u32 cr;
+-
+- /* Enable transmitter */
+- cr = pl011_read(uap, REG_CR);
+- cr |= UART011_CR_TXE;
+-
+- /* Disable receiver if half-duplex */
+- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+- cr &= ~UART011_CR_RXE;
+-
+- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+- cr &= ~UART011_CR_RTS;
+- else
+- cr |= UART011_CR_RTS;
+-
+- pl011_write(cr, uap, REG_CR);
+-
+- if (port->rs485.delay_rts_before_send)
+- mdelay(port->rs485.delay_rts_before_send);
+-
+- uap->rs485_tx_started = true;
+-}
+-
+ /* Returns true if tx interrupts have to be (kept) enabled */
+ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+ {
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ int count = uap->fifosize >> 1;
+
+- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+- !uap->rs485_tx_started)
+- pl011_rs485_tx_start(uap);
+-
+ if (uap->port.x_char) {
+ if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
+ return true;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index b6f4f436a5653..e5f933beb6c05 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
+ writel_relaxed(cr3, port->membase + ofs->cr3);
+ writel_relaxed(cr1, port->membase + ofs->cr1);
+
+- rs485conf->flags |= SER_RS485_RX_DURING_TX;
++ if (!port->rs485_rx_during_tx_gpio)
++ rs485conf->flags |= SER_RS485_RX_DURING_TX;
++
+ } else {
+ stm32_usart_clr_bits(port, ofs->cr3,
+ USART_CR3_DEM | USART_CR3_DEP);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 44e0437bd19d9..ee9119b708f01 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -2949,7 +2949,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
+ */
+ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
+ {
+- u32 mask = 1U << task_tag;
++ u32 mask;
+ unsigned long flags;
+ int err;
+
+@@ -2967,6 +2967,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
+ return 0;
+ }
+
++ mask = 1U << task_tag;
++
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, mask);
+@@ -6251,7 +6253,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+ ufshcd_hold(hba);
+ if (!ufshcd_is_clkgating_allowed(hba))
+ ufshcd_setup_clocks(hba, true);
+- ufshcd_release(hba);
+ pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
+ ufshcd_vops_resume(hba, pm_op);
+ } else {
+diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
+index d140010257004..b1b46c7c63f8b 100644
+--- a/drivers/usb/cdns3/cdns3-gadget.c
++++ b/drivers/usb/cdns3/cdns3-gadget.c
+@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
+ return;
+ }
+
+- if (request->complete) {
++ /*
++ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
++ * gadget composite driver.
++ */
++ if (request->complete && request->buf != priv_dev->zlp_buf) {
+ spin_unlock(&priv_dev->lock);
+ usb_gadget_giveback_request(&priv_ep->endpoint,
+ request);
+@@ -2539,11 +2543,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
+
+ while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
+ priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
++ list_del_init(&priv_req->list);
+
+ kfree(priv_req->request.buf);
+ cdns3_gadget_ep_free_request(&priv_ep->endpoint,
+ &priv_req->request);
+- list_del_init(&priv_req->list);
+ --priv_ep->wa2_counter;
+ }
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 33548771a0d3a..465e9267b49c1 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -395,7 +395,6 @@ static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
+ return ret;
+ }
+
+-
+ /**
+ * cdns_wakeup_irq - interrupt handler for wakeup events
+ * @irq: irq number for cdns3/cdnsp core device
+diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
+index 04b6d12f2b9a3..ee917f1b091c8 100644
+--- a/drivers/usb/cdns3/drd.c
++++ b/drivers/usb/cdns3/drd.c
+@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
+ */
+ static void cdns_otg_disable_irq(struct cdns *cdns)
+ {
+- writel(0, &cdns->otg_irq_regs->ien);
++ if (cdns->version)
++ writel(0, &cdns->otg_irq_regs->ien);
+ }
+
+ /**
+@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
+
+ cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
+
+- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
++ state = readl(&cdns->otg_cdnsp_regs->did);
++
++ if (OTG_CDNSP_CHECK_DID(state)) {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_cdnsp_regs->ien;
+ cdns->version = CDNSP_CONTROLLER_V2;
+- } else {
++ } else if (OTG_CDNS3_CHECK_DID(state)) {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_v1_regs->ien;
+ writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->version = CDNS3_CONTROLLER_V1;
++ } else {
++ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
++ return -EINVAL;
+ }
+
+ dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
+ return 0;
+ }
+
+-
+ /* Indicate the cdns3 core was power lost before */
+ bool cdns_power_is_lost(struct cdns *cdns)
+ {
+diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
+index cbdf94f73ed91..d72370c321d39 100644
+--- a/drivers/usb/cdns3/drd.h
++++ b/drivers/usb/cdns3/drd.h
+@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
+ __le32 susp_timing_ctrl;
+ };
+
+-#define OTG_CDNSP_DID 0x0004034E
++/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
++#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
++
++/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
++#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
+
+ /*
+ * Common registers interface for both CDNS3 and CDNSP version of DRD.
+diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
+index 6164fc4c96a49..ceca4d839dfd4 100644
+--- a/drivers/usb/cdns3/host.c
++++ b/drivers/usb/cdns3/host.c
+@@ -18,6 +18,11 @@
+ #include "../host/xhci.h"
+ #include "../host/xhci-plat.h"
+
++/*
++ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
++ * in Cadence USB3 dual-role controller, so it can't be used
++ * with Cadence CDNSP dual-role controller.
++ */
+ #define XECP_PORT_CAP_REG 0x8000
+ #define XECP_AUX_CTRL_REG1 0x8120
+
+@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
+ .resume_quirk = xhci_cdns3_resume_quirk,
+ };
+
++static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
++
+ static int __cdns_host_init(struct cdns *cdns)
+ {
+ struct platform_device *xhci;
+@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
+ goto err1;
+ }
+
+- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+- sizeof(struct xhci_plat_priv), GFP_KERNEL);
++ if (cdns->version < CDNSP_CONTROLLER_V2)
++ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
++ sizeof(struct xhci_plat_priv), GFP_KERNEL);
++ else
++ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
++ sizeof(struct xhci_plat_priv), GFP_KERNEL);
++
+ if (!cdns->xhci_plat_data) {
+ ret = -ENOMEM;
+ goto err1;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 4c8dd67246788..28f49400f3e8b 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2650,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
++ if (!dwc->pullups_connected) {
++ spin_unlock_irqrestore(&dwc->lock, flags);
++ return 0;
++ }
++
+ dwc->connected = false;
+
+ /*
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index cc0ed29a4adc0..5712883a7527c 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1325,7 +1325,15 @@ static int ncm_unwrap_ntb(struct gether *port,
+ "Parsed NTB with %d frames\n", dgram_counter);
+
+ to_process -= block_len;
+- if (to_process != 0) {
++
++ /*
++ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
++ * zero pad as needed.
++ */
++ if (to_process == 1 &&
++ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
++ to_process--;
++ } else if (to_process > 0) {
+ ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+ goto parse_ntb;
+ }
+diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
+index 10c5d7f726a1f..f90eeecf27de1 100644
+--- a/drivers/usb/gadget/udc/omap_udc.c
++++ b/drivers/usb/gadget/udc/omap_udc.c
+@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
+
+ static inline int machine_without_vbus_sense(void)
+ {
+- return machine_is_omap_osk() || machine_is_sx1();
++ return machine_is_omap_osk() || machine_is_omap_palmte() ||
++ machine_is_sx1();
+ }
+
+ static int omap_udc_start(struct usb_gadget *g,
+diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
+index ae41578bd0149..70165dd86b5de 100644
+--- a/drivers/usb/roles/class.c
++++ b/drivers/usb/roles/class.c
+@@ -21,7 +21,9 @@ static const struct class role_class = {
+ struct usb_role_switch {
+ struct device dev;
+ struct mutex lock; /* device lock*/
++ struct module *module; /* the module this device depends on */
+ enum usb_role role;
++ bool registered;
+
+ /* From descriptor */
+ struct device *usb2_port;
+@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
+ if (IS_ERR_OR_NULL(sw))
+ return 0;
+
++ if (!sw->registered)
++ return -EOPNOTSUPP;
++
+ mutex_lock(&sw->lock);
+
+ ret = sw->set(sw, role);
+@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
+ {
+ enum usb_role role;
+
+- if (IS_ERR_OR_NULL(sw))
++ if (IS_ERR_OR_NULL(sw) || !sw->registered)
+ return USB_ROLE_NONE;
+
+ mutex_lock(&sw->lock);
+@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
+ usb_role_switch_match);
+
+ if (!IS_ERR_OR_NULL(sw))
+- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
++ WARN_ON(!try_module_get(sw->module));
+
+ return sw;
+ }
+@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
+ sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
+ NULL, usb_role_switch_match);
+ if (!IS_ERR_OR_NULL(sw))
+- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
++ WARN_ON(!try_module_get(sw->module));
+
+ return sw;
+ }
+@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
+ void usb_role_switch_put(struct usb_role_switch *sw)
+ {
+ if (!IS_ERR_OR_NULL(sw)) {
+- module_put(sw->dev.parent->driver->owner);
++ module_put(sw->module);
+ put_device(&sw->dev);
+ }
+ }
+@@ -189,15 +194,18 @@ struct usb_role_switch *
+ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+ {
+ struct device *dev;
++ struct usb_role_switch *sw = NULL;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = class_find_device_by_fwnode(&role_class, fwnode);
+- if (dev)
+- WARN_ON(!try_module_get(dev->parent->driver->owner));
++ if (dev) {
++ sw = to_role_switch(dev);
++ WARN_ON(!try_module_get(sw->module));
++ }
+
+- return dev ? to_role_switch(dev) : NULL;
++ return sw;
+ }
+ EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+
+@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
+ sw->set = desc->set;
+ sw->get = desc->get;
+
++ sw->module = parent->driver->owner;
+ sw->dev.parent = parent;
+ sw->dev.fwnode = desc->fwnode;
+ sw->dev.class = &role_class;
+@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
+ return ERR_PTR(ret);
+ }
+
++ sw->registered = true;
++
+ /* TODO: Symlinks for the host port and the device controller. */
+
+ return sw;
+@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
+ */
+ void usb_role_switch_unregister(struct usb_role_switch *sw)
+ {
+- if (!IS_ERR_OR_NULL(sw))
++ if (!IS_ERR_OR_NULL(sw)) {
++ sw->registered = false;
+ device_unregister(&sw->dev);
++ }
+ }
+ EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
+
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index c54e9805da536..12cf9940e5b67 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -179,6 +179,13 @@ static int slave_configure(struct scsi_device *sdev)
+ */
+ sdev->use_192_bytes_for_3f = 1;
+
++ /*
++ * Some devices report generic values until the media has been
++ * accessed. Force a READ(10) prior to querying device
++ * characteristics.
++ */
++ sdev->read_before_ms = 1;
++
+ /*
+ * Some devices don't like MODE SENSE with page=0x3f,
+ * which is the command used for checking if a device
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 2583ee9815c55..18d6d6e2b44bc 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -878,6 +878,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
++ /*
++ * Some devices report generic values until the media has been
++ * accessed. Force a READ(10) prior to querying device
++ * characteristics.
++ */
++ sdev->read_before_ms = 1;
++
+ /*
+ * Some devices don't like MODE SENSE with page=0x3f,
+ * which is the command used for checking if a device
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 6d455ca76125e..53c60d93bbeb0 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -3730,9 +3730,6 @@ static void tcpm_detach(struct tcpm_port *port)
+ if (tcpm_port_is_disconnected(port))
+ port->hard_reset_count = 0;
+
+- port->try_src_count = 0;
+- port->try_snk_count = 0;
+-
+ if (!port->attached)
+ return;
+
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index fa222080887d5..928eacbeb21ac 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -25,6 +25,8 @@ struct ucsi_acpi {
+ unsigned long flags;
+ guid_t guid;
+ u64 cmd;
++ bool dell_quirk_probed;
++ bool dell_quirk_active;
+ };
+
+ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
+@@ -126,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
+ .async_write = ucsi_acpi_async_write
+ };
+
+-static const struct dmi_system_id zenbook_dmi_id[] = {
++/*
++ * Some Dell laptops expect that an ACK command with the
++ * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
++ * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
++ * If this is not done events are not delivered to OSPM and
++ * subsequent commands will timeout.
++ */
++static int
++ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
++ const void *val, size_t val_len)
++{
++ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
++ u64 cmd = *(u64 *)val, ack = 0;
++ int ret;
++
++ if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
++ cmd & UCSI_ACK_CONNECTOR_CHANGE)
++ ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
++
++ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
++ if (ret != 0)
++ return ret;
++ if (ack == 0)
++ return ret;
++
++ if (!ua->dell_quirk_probed) {
++ ua->dell_quirk_probed = true;
++
++ cmd = UCSI_GET_CAPABILITY;
++ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
++ sizeof(cmd));
++ if (ret == 0)
++ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
++ &ack, sizeof(ack));
++ if (ret != -ETIMEDOUT)
++ return ret;
++
++ ua->dell_quirk_active = true;
++ dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
++ dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
++ }
++
++ if (!ua->dell_quirk_active)
++ return ret;
++
++ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
++}
++
++static const struct ucsi_operations ucsi_dell_ops = {
++ .read = ucsi_acpi_read,
++ .sync_write = ucsi_dell_sync_write,
++ .async_write = ucsi_acpi_async_write
++};
++
++static const struct dmi_system_id ucsi_acpi_quirks[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
++ .driver_data = (void *)&ucsi_zenbook_ops,
++ },
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ },
++ .driver_data = (void *)&ucsi_dell_ops,
+ },
+ { }
+ };
+@@ -160,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ {
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ const struct ucsi_operations *ops = &ucsi_acpi_ops;
++ const struct dmi_system_id *id;
+ struct ucsi_acpi *ua;
+ struct resource *res;
+ acpi_status status;
+@@ -189,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
+ init_completion(&ua->complete);
+ ua->dev = &pdev->dev;
+
+- if (dmi_check_system(zenbook_dmi_id))
+- ops = &ucsi_zenbook_ops;
++ id = dmi_first_match(ucsi_acpi_quirks);
++ if (id)
++ ops = id->driver_data;
+
+ ua->ucsi = ucsi_create(&pdev->dev, ops);
+ if (IS_ERR(ua->ucsi))
+diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c
+index 0848f920efb7c..7af5b204990bb 100644
+--- a/drivers/vfio/iova_bitmap.c
++++ b/drivers/vfio/iova_bitmap.c
+@@ -100,7 +100,7 @@ struct iova_bitmap {
+ struct iova_bitmap_map mapped;
+
+ /* userspace address of the bitmap */
+- u64 __user *bitmap;
++ u8 __user *bitmap;
+
+ /* u64 index that @mapped points to */
+ unsigned long mapped_base_index;
+@@ -162,7 +162,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
+ {
+ struct iova_bitmap_map *mapped = &bitmap->mapped;
+ unsigned long npages;
+- u64 __user *addr;
++ u8 __user *addr;
+ long ret;
+
+ /*
+@@ -175,18 +175,19 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
+ bitmap->mapped_base_index) *
+ sizeof(*bitmap->bitmap), PAGE_SIZE);
+
+- /*
+- * We always cap at max number of 'struct page' a base page can fit.
+- * This is, for example, on x86 means 2M of bitmap data max.
+- */
+- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
+-
+ /*
+ * Bitmap address to be pinned is calculated via pointer arithmetic
+ * with bitmap u64 word index.
+ */
+ addr = bitmap->bitmap + bitmap->mapped_base_index;
+
++ /*
++ * We always cap at max number of 'struct page' a base page can fit.
++ * This is, for example, on x86 means 2M of bitmap data max.
++ */
++ npages = min(npages + !!offset_in_page(addr),
++ PAGE_SIZE / sizeof(struct page *));
++
+ ret = pin_user_pages_fast((unsigned long)addr, npages,
+ FOLL_WRITE, mapped->pages);
+ if (ret <= 0)
+@@ -247,7 +248,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+
+ mapped = &bitmap->mapped;
+ mapped->pgshift = __ffs(page_size);
+- bitmap->bitmap = data;
++ bitmap->bitmap = (u8 __user *)data;
+ bitmap->mapped_total_index =
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ bitmap->iova = iova;
+@@ -302,7 +303,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
+
+ remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
+ remaining = min_t(unsigned long, remaining,
+- bytes / sizeof(*bitmap->bitmap));
++ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
+
+ return remaining;
+ }
+@@ -406,6 +407,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
+ mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
++ unsigned long last_page_idx = mapped->npages - 1;
+
+ do {
+ unsigned int page_idx = cur_bit / BITS_PER_PAGE;
+@@ -414,6 +416,9 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
+ last_bit - cur_bit + 1);
+ void *kaddr;
+
++ if (unlikely(page_idx > last_page_idx))
++ break;
++
+ kaddr = kmap_local_page(mapped->pages[page_idx]);
+ bitmap_set(kaddr, offset, nbits);
+ kunmap_local(kaddr);
+diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
+index b5f84bd4804b8..4ba5cd55e5a52 100644
+--- a/drivers/video/fbdev/savage/savagefb_driver.c
++++ b/drivers/video/fbdev/savage/savagefb_driver.c
+@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
+
+ DBG("savagefb_check_var");
+
++ if (!var->pixclock)
++ return -EINVAL;
++
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ switch (var->bits_per_pixel) {
+diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
+index 0f5374f6ef055..6d524a65af181 100644
+--- a/drivers/video/fbdev/sis/sis_main.c
++++ b/drivers/video/fbdev/sis/sis_main.c
+@@ -1475,6 +1475,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+
+ vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+
++ if (!var->pixclock)
++ return -EINVAL;
+ pixclock = var->pixclock;
+
+ if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
+diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
+index b8f2f971c2f0f..e3585330cf98b 100644
+--- a/drivers/xen/events/events_2l.c
++++ b/drivers/xen/events/events_2l.c
+@@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
+ int i;
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
++ evtchn_port_t evtchn;
+
+ /* Timer interrupt has highest priority. */
+- irq = irq_from_virq(cpu, VIRQ_TIMER);
++ irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
+ if (irq != -1) {
+- evtchn_port_t evtchn = evtchn_from_irq(irq);
+ word_idx = evtchn / BITS_PER_LONG;
+ bit_idx = evtchn % BITS_PER_LONG;
+ if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
+@@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
+ for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
+ if (sync_test_bit(i, BM(sh->evtchn_pending))) {
+ int word_idx = i / BITS_PER_EVTCHN_WORD;
+- printk(" %d: event %d -> irq %d%s%s%s\n",
++ printk(" %d: event %d -> irq %u%s%s%s\n",
+ cpu_from_evtchn(i), i,
+- get_evtchn_to_irq(i),
++ irq_from_evtchn(i),
+ sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
+ ? "" : " l2-clear",
+ !sync_test_bit(i, BM(sh->evtchn_mask))
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index c50419638ac0a..0c5259c68adeb 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -248,15 +248,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
+ return 0;
+ }
+
+-int get_evtchn_to_irq(evtchn_port_t evtchn)
+-{
+- if (evtchn >= xen_evtchn_max_channels())
+- return -1;
+- if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+- return -1;
+- return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
+-}
+-
+ /* Get info for IRQ */
+ static struct irq_info *info_for_irq(unsigned irq)
+ {
+@@ -274,6 +265,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
+ irq_set_chip_data(irq, info);
+ }
+
++static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
++{
++ int irq;
++
++ if (evtchn >= xen_evtchn_max_channels())
++ return NULL;
++ if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
++ return NULL;
++ irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
++
++ return (irq < 0) ? NULL : info_for_irq(irq);
++}
++
+ /* Per CPU channel accounting */
+ static void channels_on_cpu_dec(struct irq_info *info)
+ {
+@@ -300,6 +304,13 @@ static void channels_on_cpu_inc(struct irq_info *info)
+ info->is_accounted = 1;
+ }
+
++static void xen_irq_free_desc(unsigned int irq)
++{
++ /* Legacy IRQ descriptors are managed by the arch. */
++ if (irq >= nr_legacy_irqs())
++ irq_free_desc(irq);
++}
++
+ static void delayed_free_irq(struct work_struct *work)
+ {
+ struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
+@@ -311,14 +322,11 @@ static void delayed_free_irq(struct work_struct *work)
+
+ kfree(info);
+
+- /* Legacy IRQ descriptors are managed by the arch. */
+- if (irq >= nr_legacy_irqs())
+- irq_free_desc(irq);
++ xen_irq_free_desc(irq);
+ }
+
+ /* Constructors for packed IRQ information. */
+ static int xen_irq_info_common_setup(struct irq_info *info,
+- unsigned irq,
+ enum xen_irq_type type,
+ evtchn_port_t evtchn,
+ unsigned short cpu)
+@@ -328,29 +336,27 @@ static int xen_irq_info_common_setup(struct irq_info *info,
+ BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
+
+ info->type = type;
+- info->irq = irq;
+ info->evtchn = evtchn;
+ info->cpu = cpu;
+ info->mask_reason = EVT_MASK_REASON_EXPLICIT;
+ raw_spin_lock_init(&info->lock);
+
+- ret = set_evtchn_to_irq(evtchn, irq);
++ ret = set_evtchn_to_irq(evtchn, info->irq);
+ if (ret < 0)
+ return ret;
+
+- irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
++ irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
+
+ return xen_evtchn_port_setup(evtchn);
+ }
+
+-static int xen_irq_info_evtchn_setup(unsigned irq,
++static int xen_irq_info_evtchn_setup(struct irq_info *info,
+ evtchn_port_t evtchn,
+ struct xenbus_device *dev)
+ {
+- struct irq_info *info = info_for_irq(irq);
+ int ret;
+
+- ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
++ ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
+ info->u.interdomain = dev;
+ if (dev)
+ atomic_inc(&dev->event_channels);
+@@ -358,50 +364,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
+ return ret;
+ }
+
+-static int xen_irq_info_ipi_setup(unsigned cpu,
+- unsigned irq,
+- evtchn_port_t evtchn,
+- enum ipi_vector ipi)
++static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
++ evtchn_port_t evtchn, enum ipi_vector ipi)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ info->u.ipi = ipi;
+
+- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++ per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
+ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+
+- return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
++ return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
+ }
+
+-static int xen_irq_info_virq_setup(unsigned cpu,
+- unsigned irq,
+- evtchn_port_t evtchn,
+- unsigned virq)
++static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
++ evtchn_port_t evtchn, unsigned int virq)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ info->u.virq = virq;
+
+- per_cpu(virq_to_irq, cpu)[virq] = irq;
++ per_cpu(virq_to_irq, cpu)[virq] = info->irq;
+
+- return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
++ return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
+ }
+
+-static int xen_irq_info_pirq_setup(unsigned irq,
+- evtchn_port_t evtchn,
+- unsigned pirq,
+- unsigned gsi,
+- uint16_t domid,
+- unsigned char flags)
++static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
++ unsigned int pirq, unsigned int gsi,
++ uint16_t domid, unsigned char flags)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ info->u.pirq.pirq = pirq;
+ info->u.pirq.gsi = gsi;
+ info->u.pirq.domid = domid;
+ info->u.pirq.flags = flags;
+
+- return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
++ return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
+ }
+
+ static void xen_irq_info_cleanup(struct irq_info *info)
+@@ -415,7 +408,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
+ /*
+ * Accessors for packed IRQ information.
+ */
+-evtchn_port_t evtchn_from_irq(unsigned irq)
++static evtchn_port_t evtchn_from_irq(unsigned int irq)
+ {
+ const struct irq_info *info = NULL;
+
+@@ -429,29 +422,32 @@ evtchn_port_t evtchn_from_irq(unsigned irq)
+
+ unsigned int irq_from_evtchn(evtchn_port_t evtchn)
+ {
+- return get_evtchn_to_irq(evtchn);
++ struct irq_info *info = evtchn_to_info(evtchn);
++
++ return info ? info->irq : -1;
+ }
+ EXPORT_SYMBOL_GPL(irq_from_evtchn);
+
+-int irq_from_virq(unsigned int cpu, unsigned int virq)
++int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
++ evtchn_port_t *evtchn)
+ {
+- return per_cpu(virq_to_irq, cpu)[virq];
++ int irq = per_cpu(virq_to_irq, cpu)[virq];
++
++ *evtchn = evtchn_from_irq(irq);
++
++ return irq;
+ }
+
+-static enum ipi_vector ipi_from_irq(unsigned irq)
++static enum ipi_vector ipi_from_irq(struct irq_info *info)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_IPI);
+
+ return info->u.ipi;
+ }
+
+-static unsigned virq_from_irq(unsigned irq)
++static unsigned int virq_from_irq(struct irq_info *info)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ BUG_ON(info == NULL);
+ BUG_ON(info->type != IRQT_VIRQ);
+
+@@ -468,25 +464,11 @@ static unsigned pirq_from_irq(unsigned irq)
+ return info->u.pirq.pirq;
+ }
+
+-static enum xen_irq_type type_from_irq(unsigned irq)
+-{
+- return info_for_irq(irq)->type;
+-}
+-
+-static unsigned cpu_from_irq(unsigned irq)
+-{
+- return info_for_irq(irq)->cpu;
+-}
+-
+ unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- unsigned ret = 0;
++ struct irq_info *info = evtchn_to_info(evtchn);
+
+- if (irq != -1)
+- ret = cpu_from_irq(irq);
+-
+- return ret;
++ return info ? info->cpu : 0;
+ }
+
+ static void do_mask(struct irq_info *info, u8 reason)
+@@ -532,22 +514,17 @@ static bool pirq_needs_eoi_flag(unsigned irq)
+ return info->u.pirq.flags & PIRQ_NEEDS_EOI;
+ }
+
+-static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
++static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
+ bool force_affinity)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- struct irq_info *info = info_for_irq(irq);
+-
+- BUG_ON(irq == -1);
+-
+ if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
+- struct irq_data *data = irq_get_irq_data(irq);
++ struct irq_data *data = irq_get_irq_data(info->irq);
+
+ irq_data_update_affinity(data, cpumask_of(cpu));
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+ }
+
+- xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
++ xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
+
+ channels_on_cpu_dec(info);
+ info->cpu = cpu;
+@@ -737,50 +714,49 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
+ }
+ EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
+
+-static void xen_irq_init(unsigned irq)
++static struct irq_info *xen_irq_init(unsigned int irq)
+ {
+ struct irq_info *info;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+- if (info == NULL)
+- panic("Unable to allocate metadata for IRQ%d\n", irq);
++ if (info) {
++ info->irq = irq;
++ info->type = IRQT_UNBOUND;
++ info->refcnt = -1;
++ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
+
+- info->type = IRQT_UNBOUND;
+- info->refcnt = -1;
+- INIT_RCU_WORK(&info->rwork, delayed_free_irq);
++ set_info_for_irq(irq, info);
++ /*
++ * Interrupt affinity setting can be immediate. No point
++ * in delaying it until an interrupt is handled.
++ */
++ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+
+- set_info_for_irq(irq, info);
+- /*
+- * Interrupt affinity setting can be immediate. No point
+- * in delaying it until an interrupt is handled.
+- */
+- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
++ INIT_LIST_HEAD(&info->eoi_list);
++ list_add_tail(&info->list, &xen_irq_list_head);
++ }
+
+- INIT_LIST_HEAD(&info->eoi_list);
+- list_add_tail(&info->list, &xen_irq_list_head);
++ return info;
+ }
+
+-static int __must_check xen_allocate_irqs_dynamic(int nvec)
++static struct irq_info *xen_allocate_irq_dynamic(void)
+ {
+- int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
++ int irq = irq_alloc_desc_from(0, -1);
++ struct irq_info *info = NULL;
+
+ if (irq >= 0) {
+- for (i = 0; i < nvec; i++)
+- xen_irq_init(irq + i);
++ info = xen_irq_init(irq);
++ if (!info)
++ xen_irq_free_desc(irq);
+ }
+
+- return irq;
+-}
+-
+-static inline int __must_check xen_allocate_irq_dynamic(void)
+-{
+-
+- return xen_allocate_irqs_dynamic(1);
++ return info;
+ }
+
+-static int __must_check xen_allocate_irq_gsi(unsigned gsi)
++static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
+ {
+ int irq;
++ struct irq_info *info;
+
+ /*
+ * A PV guest has no concept of a GSI (since it has no ACPI
+@@ -797,15 +773,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+ else
+ irq = irq_alloc_desc_at(gsi, -1);
+
+- xen_irq_init(irq);
++ info = xen_irq_init(irq);
++ if (!info)
++ xen_irq_free_desc(irq);
+
+- return irq;
++ return info;
+ }
+
+-static void xen_free_irq(unsigned irq)
++static void xen_free_irq(struct irq_info *info)
+ {
+- struct irq_info *info = info_for_irq(irq);
+-
+ if (WARN_ON(!info))
+ return;
+
+@@ -896,7 +872,7 @@ static unsigned int __startup_pirq(unsigned int irq)
+ goto err;
+
+ info->evtchn = evtchn;
+- bind_evtchn_to_cpu(evtchn, 0, false);
++ bind_evtchn_to_cpu(info, 0, false);
+
+ rc = xen_evtchn_port_setup(evtchn);
+ if (rc)
+@@ -932,8 +908,8 @@ static void shutdown_pirq(struct irq_data *data)
+ return;
+
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
+- xen_evtchn_close(evtchn);
+ xen_irq_info_cleanup(info);
++ xen_evtchn_close(evtchn);
+ }
+
+ static void enable_pirq(struct irq_data *data)
+@@ -962,10 +938,15 @@ int xen_irq_from_gsi(unsigned gsi)
+ }
+ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
+
+-static void __unbind_from_irq(unsigned int irq)
++static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
+ {
+- evtchn_port_t evtchn = evtchn_from_irq(irq);
+- struct irq_info *info = info_for_irq(irq);
++ evtchn_port_t evtchn;
++ bool close_evtchn = false;
++
++ if (!info) {
++ xen_irq_free_desc(irq);
++ return;
++ }
+
+ if (info->refcnt > 0) {
+ info->refcnt--;
+@@ -973,20 +954,22 @@ static void __unbind_from_irq(unsigned int irq)
+ return;
+ }
+
++ evtchn = info->evtchn;
++
+ if (VALID_EVTCHN(evtchn)) {
+- unsigned int cpu = cpu_from_irq(irq);
++ unsigned int cpu = info->cpu;
+ struct xenbus_device *dev;
+
+ if (!info->is_static)
+- xen_evtchn_close(evtchn);
++ close_evtchn = true;
+
+- switch (type_from_irq(irq)) {
++ switch (info->type) {
+ case IRQT_VIRQ:
+- per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
++ per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
+ break;
+ case IRQT_IPI:
+- per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
+- per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
++ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
++ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
+ break;
+ case IRQT_EVTCHN:
+ dev = info->u.interdomain;
+@@ -998,9 +981,12 @@ static void __unbind_from_irq(unsigned int irq)
+ }
+
+ xen_irq_info_cleanup(info);
++
++ if (close_evtchn)
++ xen_evtchn_close(evtchn);
+ }
+
+- xen_free_irq(irq);
++ xen_free_irq(info);
+ }
+
+ /*
+@@ -1016,24 +1002,24 @@ static void __unbind_from_irq(unsigned int irq)
+ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ unsigned pirq, int shareable, char *name)
+ {
+- int irq;
++ struct irq_info *info;
+ struct physdev_irq irq_op;
+ int ret;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = xen_irq_from_gsi(gsi);
+- if (irq != -1) {
++ ret = xen_irq_from_gsi(gsi);
++ if (ret != -1) {
+ pr_info("%s: returning irq %d for gsi %u\n",
+- __func__, irq, gsi);
++ __func__, ret, gsi);
+ goto out;
+ }
+
+- irq = xen_allocate_irq_gsi(gsi);
+- if (irq < 0)
++ info = xen_allocate_irq_gsi(gsi);
++ if (!info)
+ goto out;
+
+- irq_op.irq = irq;
++ irq_op.irq = info->irq;
+ irq_op.vector = 0;
+
+ /* Only the privileged domain can do this. For non-priv, the pcifront
+@@ -1041,20 +1027,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ * this in the priv domain. */
+ if (xen_initial_domain() &&
+ HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
+- xen_free_irq(irq);
+- irq = -ENOSPC;
++ xen_free_irq(info);
++ ret = -ENOSPC;
+ goto out;
+ }
+
+- ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
++ ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
+ shareable ? PIRQ_SHAREABLE : 0);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+
+- pirq_query_unmask(irq);
++ pirq_query_unmask(info->irq);
+ /* We try to use the handler with the appropriate semantic for the
+ * type of interrupt: if the interrupt is an edge triggered
+ * interrupt we use handle_edge_irq.
+@@ -1071,16 +1056,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ * is the right choice either way.
+ */
+ if (shareable)
+- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
+ handle_fasteoi_irq, name);
+ else
+- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
+ handle_edge_irq, name);
+
++ ret = info->irq;
++
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+
+- return irq;
++ return ret;
+ }
+
+ #ifdef CONFIG_PCI_MSI
+@@ -1102,17 +1089,24 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ int pirq, int nvec, const char *name, domid_t domid)
+ {
+ int i, irq, ret;
++ struct irq_info *info;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = xen_allocate_irqs_dynamic(nvec);
++ irq = irq_alloc_descs(-1, 0, nvec, -1);
+ if (irq < 0)
+ goto out;
+
+ for (i = 0; i < nvec; i++) {
++ info = xen_irq_init(irq + i);
++ if (!info) {
++ ret = -ENOMEM;
++ goto error_irq;
++ }
++
+ irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
+
+- ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
++ ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
+ i == 0 ? 0 : PIRQ_MSI_GROUP);
+ if (ret < 0)
+ goto error_irq;
+@@ -1124,9 +1118,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+ return irq;
++
+ error_irq:
+- while (nvec--)
+- __unbind_from_irq(irq + nvec);
++ while (nvec--) {
++ info = info_for_irq(irq + nvec);
++ __unbind_from_irq(info, irq + nvec);
++ }
+ mutex_unlock(&irq_mapping_update_lock);
+ return ret;
+ }
+@@ -1162,7 +1159,7 @@ int xen_destroy_irq(int irq)
+ }
+ }
+
+- xen_free_irq(irq);
++ xen_free_irq(info);
+
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+@@ -1201,28 +1198,27 @@ EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
+ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
+ struct xenbus_device *dev)
+ {
+- int irq;
+- int ret;
++ int ret = -ENOMEM;
++ struct irq_info *info;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return -ENOMEM;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = get_evtchn_to_irq(evtchn);
++ info = evtchn_to_info(evtchn);
+
+- if (irq == -1) {
+- irq = xen_allocate_irq_dynamic();
+- if (irq < 0)
++ if (!info) {
++ info = xen_allocate_irq_dynamic();
++ if (!info)
+ goto out;
+
+- irq_set_chip_and_handler_name(irq, chip,
++ irq_set_chip_and_handler_name(info->irq, chip,
+ handle_edge_irq, "event");
+
+- ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
++ ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+ /*
+@@ -1232,16 +1228,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
+ * affinity setting is not invoked on them so nothing would
+ * bind the channel.
+ */
+- bind_evtchn_to_cpu(evtchn, 0, false);
+- } else {
+- struct irq_info *info = info_for_irq(irq);
+- WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
++ bind_evtchn_to_cpu(info, 0, false);
++ } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
++ info->refcnt++;
+ }
+
++ ret = info->irq;
++
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+
+- return irq;
++ return ret;
+ }
+
+ int bind_evtchn_to_irq(evtchn_port_t evtchn)
+@@ -1260,18 +1257,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+ {
+ struct evtchn_bind_ipi bind_ipi;
+ evtchn_port_t evtchn;
+- int ret, irq;
++ struct irq_info *info;
++ int ret;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = per_cpu(ipi_to_irq, cpu)[ipi];
++ ret = per_cpu(ipi_to_irq, cpu)[ipi];
+
+- if (irq == -1) {
+- irq = xen_allocate_irq_dynamic();
+- if (irq < 0)
++ if (ret == -1) {
++ info = xen_allocate_irq_dynamic();
++ if (!info)
+ goto out;
+
+- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
+ handle_percpu_irq, "ipi");
+
+ bind_ipi.vcpu = xen_vcpu_nr(cpu);
+@@ -1280,25 +1278,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+ BUG();
+ evtchn = bind_ipi.port;
+
+- ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
++ ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+ /*
+ * Force the affinity mask to the target CPU so proc shows
+ * the correct target.
+ */
+- bind_evtchn_to_cpu(evtchn, cpu, true);
++ bind_evtchn_to_cpu(info, cpu, true);
++ ret = info->irq;
+ } else {
+- struct irq_info *info = info_for_irq(irq);
++ info = info_for_irq(ret);
+ WARN_ON(info == NULL || info->type != IRQT_IPI);
+ }
+
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+- return irq;
++ return ret;
+ }
+
+ static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
+@@ -1366,22 +1364,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ {
+ struct evtchn_bind_virq bind_virq;
+ evtchn_port_t evtchn = 0;
+- int irq, ret;
++ struct irq_info *info;
++ int ret;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = per_cpu(virq_to_irq, cpu)[virq];
++ ret = per_cpu(virq_to_irq, cpu)[virq];
+
+- if (irq == -1) {
+- irq = xen_allocate_irq_dynamic();
+- if (irq < 0)
++ if (ret == -1) {
++ info = xen_allocate_irq_dynamic();
++ if (!info)
+ goto out;
+
+ if (percpu)
+- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
+ handle_percpu_irq, "virq");
+ else
+- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
++ irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
+ handle_edge_irq, "virq");
+
+ bind_virq.virq = virq;
+@@ -1396,10 +1395,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ BUG_ON(ret < 0);
+ }
+
+- ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
++ ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
+ if (ret < 0) {
+- __unbind_from_irq(irq);
+- irq = ret;
++ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
+
+@@ -1407,22 +1405,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
+ * Force the affinity mask for percpu interrupts so proc
+ * shows the correct target.
+ */
+- bind_evtchn_to_cpu(evtchn, cpu, percpu);
++ bind_evtchn_to_cpu(info, cpu, percpu);
++ ret = info->irq;
+ } else {
+- struct irq_info *info = info_for_irq(irq);
++ info = info_for_irq(ret);
+ WARN_ON(info == NULL || info->type != IRQT_VIRQ);
+ }
+
+ out:
+ mutex_unlock(&irq_mapping_update_lock);
+
+- return irq;
++ return ret;
+ }
+
+ static void unbind_from_irq(unsigned int irq)
+ {
++ struct irq_info *info;
++
+ mutex_lock(&irq_mapping_update_lock);
+- __unbind_from_irq(irq);
++ info = info_for_irq(irq);
++ __unbind_from_irq(info, irq);
+ mutex_unlock(&irq_mapping_update_lock);
+ }
+
+@@ -1573,13 +1575,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority);
+
+ int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- struct irq_info *info;
+-
+- if (irq == -1)
+- return -ENOENT;
+-
+- info = info_for_irq(irq);
++ struct irq_info *info = evtchn_to_info(evtchn);
+
+ if (!info)
+ return -ENOENT;
+@@ -1595,7 +1591,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
+
+ int evtchn_get(evtchn_port_t evtchn)
+ {
+- int irq;
+ struct irq_info *info;
+ int err = -ENOENT;
+
+@@ -1604,11 +1599,7 @@ int evtchn_get(evtchn_port_t evtchn)
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = get_evtchn_to_irq(evtchn);
+- if (irq == -1)
+- goto done;
+-
+- info = info_for_irq(irq);
++ info = evtchn_to_info(evtchn);
+
+ if (!info)
+ goto done;
+@@ -1628,10 +1619,11 @@ EXPORT_SYMBOL_GPL(evtchn_get);
+
+ void evtchn_put(evtchn_port_t evtchn)
+ {
+- int irq = get_evtchn_to_irq(evtchn);
+- if (WARN_ON(irq == -1))
++ struct irq_info *info = evtchn_to_info(evtchn);
++
++ if (WARN_ON(!info))
+ return;
+- unbind_from_irq(irq);
++ unbind_from_irq(info->irq);
+ }
+ EXPORT_SYMBOL_GPL(evtchn_put);
+
+@@ -1661,12 +1653,10 @@ struct evtchn_loop_ctrl {
+
+ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ {
+- int irq;
+- struct irq_info *info;
++ struct irq_info *info = evtchn_to_info(port);
+ struct xenbus_device *dev;
+
+- irq = get_evtchn_to_irq(port);
+- if (irq == -1)
++ if (!info)
+ return;
+
+ /*
+@@ -1691,7 +1681,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ }
+ }
+
+- info = info_for_irq(irq);
+ if (xchg_acquire(&info->is_active, 1))
+ return;
+
+@@ -1705,7 +1694,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
+ info->eoi_time = get_jiffies_64() + event_eoi_delay;
+ }
+
+- generic_handle_irq(irq);
++ generic_handle_irq(info->irq);
+ }
+
+ int xen_evtchn_do_upcall(void)
+@@ -1763,16 +1752,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
+ mutex_lock(&irq_mapping_update_lock);
+
+ /* After resume the irq<->evtchn mappings are all cleared out */
+- BUG_ON(get_evtchn_to_irq(evtchn) != -1);
++ BUG_ON(evtchn_to_info(evtchn));
+ /* Expect irq to have been bound before,
+ so there should be a proper type */
+ BUG_ON(info->type == IRQT_UNBOUND);
+
+- (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
++ info->irq = irq;
++ (void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
+
+ mutex_unlock(&irq_mapping_update_lock);
+
+- bind_evtchn_to_cpu(evtchn, info->cpu, false);
++ bind_evtchn_to_cpu(info, info->cpu, false);
+
+ /* Unmask the event channel. */
+ enable_irq(irq);
+@@ -1806,7 +1796,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
+ * it, but don't do the xenlinux-level rebind in that case.
+ */
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+- bind_evtchn_to_cpu(evtchn, tcpu, false);
++ bind_evtchn_to_cpu(info, tcpu, false);
+
+ do_unmask(info, EVT_MASK_REASON_TEMPORARY);
+
+@@ -1957,7 +1947,7 @@ static void restore_pirqs(void)
+ if (rc) {
+ pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
+ gsi, irq, pirq, rc);
+- xen_free_irq(irq);
++ xen_free_irq(info);
+ continue;
+ }
+
+@@ -1971,13 +1961,15 @@ static void restore_cpu_virqs(unsigned int cpu)
+ {
+ struct evtchn_bind_virq bind_virq;
+ evtchn_port_t evtchn;
++ struct irq_info *info;
+ int virq, irq;
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
++ info = info_for_irq(irq);
+
+- BUG_ON(virq_from_irq(irq) != virq);
++ BUG_ON(virq_from_irq(info) != virq);
+
+ /* Get a new binding from Xen. */
+ bind_virq.virq = virq;
+@@ -1988,9 +1980,9 @@ static void restore_cpu_virqs(unsigned int cpu)
+ evtchn = bind_virq.port;
+
+ /* Record the new mapping. */
+- (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
++ xen_irq_info_virq_setup(info, cpu, evtchn, virq);
+ /* The affinity mask is still valid */
+- bind_evtchn_to_cpu(evtchn, cpu, false);
++ bind_evtchn_to_cpu(info, cpu, false);
+ }
+ }
+
+@@ -1998,13 +1990,15 @@ static void restore_cpu_ipis(unsigned int cpu)
+ {
+ struct evtchn_bind_ipi bind_ipi;
+ evtchn_port_t evtchn;
++ struct irq_info *info;
+ int ipi, irq;
+
+ for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
+ continue;
++ info = info_for_irq(irq);
+
+- BUG_ON(ipi_from_irq(irq) != ipi);
++ BUG_ON(ipi_from_irq(info) != ipi);
+
+ /* Get a new binding from Xen. */
+ bind_ipi.vcpu = xen_vcpu_nr(cpu);
+@@ -2014,9 +2008,9 @@ static void restore_cpu_ipis(unsigned int cpu)
+ evtchn = bind_ipi.port;
+
+ /* Record the new mapping. */
+- (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
++ xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
+ /* The affinity mask is still valid */
+- bind_evtchn_to_cpu(evtchn, cpu, false);
++ bind_evtchn_to_cpu(info, cpu, false);
+ }
+ }
+
+diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
+index 4d3398eff9cdf..19ae31695edcf 100644
+--- a/drivers/xen/events/events_internal.h
++++ b/drivers/xen/events/events_internal.h
+@@ -33,7 +33,6 @@ struct evtchn_ops {
+
+ extern const struct evtchn_ops *evtchn_ops;
+
+-int get_evtchn_to_irq(evtchn_port_t evtchn);
+ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
+
+ unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index 9139a7364df53..59717628ca42b 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -397,7 +397,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
+ if (rc < 0)
+ goto err;
+
+- rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
++ rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, IRQF_SHARED,
+ u->name, evtchn);
+ if (rc < 0)
+ goto err;
+diff --git a/fs/afs/volume.c b/fs/afs/volume.c
+index 115c081a8e2ce..c028598a903c9 100644
+--- a/fs/afs/volume.c
++++ b/fs/afs/volume.c
+@@ -337,7 +337,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
+ {
+ struct afs_server_list *new, *old, *discard;
+ struct afs_vldb_entry *vldb;
+- char idbuf[16];
++ char idbuf[24];
+ int ret, idsz;
+
+ _enter("");
+@@ -345,7 +345,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
+ /* We look up an ID by passing it as a decimal string in the
+ * operation's name parameter.
+ */
+- idsz = sprintf(idbuf, "%llu", volume->vid);
++ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
+
+ vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
+ if (IS_ERR(vldb)) {
+diff --git a/fs/aio.c b/fs/aio.c
+index f8589caef9c10..3235d4e6cc623 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -594,6 +594,13 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+ struct kioctx *ctx = req->ki_ctx;
+ unsigned long flags;
+
++ /*
++ * kiocb didn't come from aio or is neither a read nor a write, hence
++ * ignore it.
++ */
++ if (!(iocb->ki_flags & IOCB_AIO_RW))
++ return;
++
+ if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
+ return;
+
+@@ -1463,7 +1470,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
+ req->ki_complete = aio_complete_rw;
+ req->private = NULL;
+ req->ki_pos = iocb->aio_offset;
+- req->ki_flags = req->ki_filp->f_iocb_flags;
++ req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW;
+ if (iocb->aio_flags & IOCB_FLAG_RESFD)
+ req->ki_flags |= IOCB_EVENTFD;
+ if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
+diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
+index f2ff4cbe8656b..a2e614cf3c19c 100644
+--- a/fs/btrfs/defrag.c
++++ b/fs/btrfs/defrag.c
+@@ -903,7 +903,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
+ goto add;
+
+ /* Skip too large extent */
+- if (range_len >= extent_thresh)
++ if (em->len >= extent_thresh)
+ goto next;
+
+ /*
+diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
+index 7077f72e6f474..f449f7340aad0 100644
+--- a/fs/cachefiles/cache.c
++++ b/fs/cachefiles/cache.c
+@@ -168,6 +168,8 @@ int cachefiles_add_cache(struct cachefiles_cache *cache)
+ dput(root);
+ error_open_root:
+ cachefiles_end_secure(cache, saved_cred);
++ put_cred(cache->cache_cred);
++ cache->cache_cred = NULL;
+ error_getsec:
+ fscache_relinquish_cache(cache_cookie);
+ cache->cache = NULL;
+diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
+index aa4efcabb5e37..5f4df9588620f 100644
+--- a/fs/cachefiles/daemon.c
++++ b/fs/cachefiles/daemon.c
+@@ -805,6 +805,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+ cachefiles_put_directory(cache->graveyard);
+ cachefiles_put_directory(cache->store);
+ mntput(cache->mnt);
++ put_cred(cache->cache_cred);
+
+ kfree(cache->rootdirname);
+ kfree(cache->secctx);
+diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
+index d4f631d39f0fa..f0110a78acb20 100644
+--- a/fs/erofs/namei.c
++++ b/fs/erofs/namei.c
+@@ -130,24 +130,24 @@ static void *erofs_find_target_block(struct erofs_buf *target,
+ /* string comparison without already matched prefix */
+ diff = erofs_dirnamecmp(name, &dname, &matched);
+
+- if (!diff) {
+- *_ndirents = 0;
+- goto out;
+- } else if (diff > 0) {
+- head = mid + 1;
+- startprfx = matched;
+-
+- if (!IS_ERR(candidate))
+- erofs_put_metabuf(target);
+- *target = buf;
+- candidate = de;
+- *_ndirents = ndirents;
+- } else {
++ if (diff < 0) {
+ erofs_put_metabuf(&buf);
+-
+ back = mid - 1;
+ endprfx = matched;
++ continue;
++ }
++
++ if (!IS_ERR(candidate))
++ erofs_put_metabuf(target);
++ *target = buf;
++ if (!diff) {
++ *_ndirents = 0;
++ return de;
+ }
++ head = mid + 1;
++ startprfx = matched;
++ candidate = de;
++ *_ndirents = ndirents;
+ continue;
+ }
+ out: /* free if the candidate is valid */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 4c3e2f38349de..d393df22431a0 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2229,7 +2229,7 @@ static int ext4_fill_es_cache_info(struct inode *inode,
+
+
+ /*
+- * ext4_ext_determine_hole - determine hole around given block
++ * ext4_ext_find_hole - find hole around given block according to the given path
+ * @inode: inode we lookup in
+ * @path: path in extent tree to @lblk
+ * @lblk: pointer to logical block around which we want to determine hole
+@@ -2241,9 +2241,9 @@ static int ext4_fill_es_cache_info(struct inode *inode,
+ * The function returns the length of a hole starting at @lblk. We update @lblk
+ * to the beginning of the hole if we managed to find it.
+ */
+-static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
+- struct ext4_ext_path *path,
+- ext4_lblk_t *lblk)
++static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t *lblk)
+ {
+ int depth = ext_depth(inode);
+ struct ext4_extent *ex;
+@@ -2270,30 +2270,6 @@ static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode,
+ return len;
+ }
+
+-/*
+- * ext4_ext_put_gap_in_cache:
+- * calculate boundaries of the gap that the requested block fits into
+- * and cache this gap
+- */
+-static void
+-ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
+- ext4_lblk_t hole_len)
+-{
+- struct extent_status es;
+-
+- ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
+- hole_start + hole_len - 1, &es);
+- if (es.es_len) {
+- /* There's delayed extent containing lblock? */
+- if (es.es_lblk <= hole_start)
+- return;
+- hole_len = min(es.es_lblk - hole_start, hole_len);
+- }
+- ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
+- ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
+- EXTENT_STATUS_HOLE);
+-}
+-
+ /*
+ * ext4_ext_rm_idx:
+ * removes index from the index block.
+@@ -4062,6 +4038,69 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ return 0;
+ }
+
++/*
++ * Determine hole length around the given logical block, first try to
++ * locate and expand the hole from the given @path, and then adjust it
++ * if it's partially or completely converted to delayed extents, insert
++ * it into the extent cache tree if it's indeed a hole, finally return
++ * the length of the determined extent.
++ */
++static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t lblk)
++{
++ ext4_lblk_t hole_start, len;
++ struct extent_status es;
++
++ hole_start = lblk;
++ len = ext4_ext_find_hole(inode, path, &hole_start);
++again:
++ ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
++ hole_start + len - 1, &es);
++ if (!es.es_len)
++ goto insert_hole;
++
++ /*
++ * There's a delalloc extent in the hole, handle it if the delalloc
++ * extent is in front of, behind and straddle the queried range.
++ */
++ if (lblk >= es.es_lblk + es.es_len) {
++ /*
++ * The delalloc extent is in front of the queried range,
++ * find again from the queried start block.
++ */
++ len -= lblk - hole_start;
++ hole_start = lblk;
++ goto again;
++ } else if (in_range(lblk, es.es_lblk, es.es_len)) {
++ /*
++ * The delalloc extent containing lblk, it must have been
++ * added after ext4_map_blocks() checked the extent status
++ * tree, adjust the length to the delalloc extent's after
++ * lblk.
++ */
++ len = es.es_lblk + es.es_len - lblk;
++ return len;
++ } else {
++ /*
++ * The delalloc extent is partially or completely behind
++ * the queried range, update hole length until the
++ * beginning of the delalloc extent.
++ */
++ len = min(es.es_lblk - hole_start, len);
++ }
++
++insert_hole:
++ /* Put just found gap into cache to speed up subsequent requests */
++ ext_debug(inode, " -> %u:%u\n", hole_start, len);
++ ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
++
++ /* Update hole_len to reflect hole size after lblk */
++ if (hole_start != lblk)
++ len -= lblk - hole_start;
++
++ return len;
++}
+
+ /*
+ * Block allocation/map/preallocation routine for extents based files
+@@ -4179,22 +4218,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ * we couldn't try to create block if create flag is zero
+ */
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+- ext4_lblk_t hole_start, hole_len;
++ ext4_lblk_t len;
+
+- hole_start = map->m_lblk;
+- hole_len = ext4_ext_determine_hole(inode, path, &hole_start);
+- /*
+- * put just found gap into cache to speed up
+- * subsequent requests
+- */
+- ext4_ext_put_gap_in_cache(inode, hole_start, hole_len);
++ len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
+
+- /* Update hole_len to reflect hole size after map->m_lblk */
+- if (hole_start != map->m_lblk)
+- hole_len -= map->m_lblk - hole_start;
+ map->m_pblk = 0;
+- map->m_len = min_t(unsigned int, map->m_len, hole_len);
+-
++ map->m_len = min_t(unsigned int, map->m_len, len);
+ goto out;
+ }
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7a2d42a84807a..ea5ac2636632b 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -841,7 +841,7 @@ mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int new_order;
+
+- if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
++ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0)
+ return;
+
+ new_order = mb_avg_fragment_size_order(sb,
+@@ -2304,6 +2304,9 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+ return;
+
+ ext4_lock_group(ac->ac_sb, group);
++ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++ goto out;
++
+ max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
+
+ if (max > 0) {
+@@ -2311,6 +2314,7 @@ void ext4_mb_try_best_found(struct ext4_allocation_context *ac,
+ ext4_mb_use_best_found(ac, e4b);
+ }
+
++out:
+ ext4_unlock_group(ac->ac_sb, group);
+ ext4_mb_unload_buddy(e4b);
+ }
+@@ -2337,12 +2341,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ if (err)
+ return err;
+
+- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
+- ext4_mb_unload_buddy(e4b);
+- return 0;
+- }
+-
+ ext4_lock_group(ac->ac_sb, group);
++ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++ goto out;
++
+ max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
+ ac->ac_g_ex.fe_len, &ex);
+ ex.fe_logical = 0xDEADFA11; /* debug value */
+@@ -2375,6 +2377,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
+ ac->ac_b_ex = ex;
+ ext4_mb_use_best_found(ac, e4b);
+ }
++out:
+ ext4_unlock_group(ac->ac_sb, group);
+ ext4_mb_unload_buddy(e4b);
+
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index 63f70259edc0d..7aadf50109994 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -886,7 +886,7 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ struct runs_tree *run = &ni->file.run;
+ struct ntfs_sb_info *sbi;
+ u8 cluster_bits;
+- struct ATTRIB *attr = NULL, *attr_b;
++ struct ATTRIB *attr, *attr_b;
+ struct ATTR_LIST_ENTRY *le, *le_b;
+ struct mft_inode *mi, *mi_b;
+ CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
+@@ -904,12 +904,8 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ *len = 0;
+ up_read(&ni->file.run_lock);
+
+- if (*len) {
+- if (*lcn != SPARSE_LCN || !new)
+- return 0; /* Fast normal way without allocation. */
+- else if (clen > *len)
+- clen = *len;
+- }
++ if (*len && (*lcn != SPARSE_LCN || !new))
++ return 0; /* Fast normal way without allocation. */
+
+ /* No cluster in cache or we need to allocate cluster in hole. */
+ sbi = ni->mi.sbi;
+@@ -918,6 +914,17 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ ni_lock(ni);
+ down_write(&ni->file.run_lock);
+
++ /* Repeat the code above (under write lock). */
++ if (!run_lookup_entry(run, vcn, lcn, len, NULL))
++ *len = 0;
++
++ if (*len) {
++ if (*lcn != SPARSE_LCN || !new)
++ goto out; /* normal way without allocation. */
++ if (clen > *len)
++ clen = *len;
++ }
++
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
+ if (!attr_b) {
+@@ -1736,8 +1743,10 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ le_b = NULL;
+ attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
+ 0, NULL, &mi_b);
+- if (!attr_b)
+- return -ENOENT;
++ if (!attr_b) {
++ err = -ENOENT;
++ goto out;
++ }
+
+ attr = attr_b;
+ le = le_b;
+@@ -1818,13 +1827,15 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ ok:
+ run_truncate_around(run, vcn);
+ out:
+- if (new_valid > data_size)
+- new_valid = data_size;
++ if (attr_b) {
++ if (new_valid > data_size)
++ new_valid = data_size;
+
+- valid_size = le64_to_cpu(attr_b->nres.valid_size);
+- if (new_valid != valid_size) {
+- attr_b->nres.valid_size = cpu_to_le64(valid_size);
+- mi_b->dirty = true;
++ valid_size = le64_to_cpu(attr_b->nres.valid_size);
++ if (new_valid != valid_size) {
++ attr_b->nres.valid_size = cpu_to_le64(valid_size);
++ mi_b->dirty = true;
++ }
+ }
+
+ return err;
+@@ -2073,7 +2084,7 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+
+ /* Update inode size. */
+ ni->i_valid = valid_size;
+- ni->vfs_inode.i_size = data_size;
++ i_size_write(&ni->vfs_inode, data_size);
+ inode_set_bytes(&ni->vfs_inode, total_size);
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ mark_inode_dirty(&ni->vfs_inode);
+@@ -2488,7 +2499,7 @@ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
+ mi_b->dirty = true;
+
+ done:
+- ni->vfs_inode.i_size += bytes;
++ i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+ mark_inode_dirty(&ni->vfs_inode);
+
+diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
+index 7c01735d1219d..9f4bd8d260901 100644
+--- a/fs/ntfs3/attrlist.c
++++ b/fs/ntfs3/attrlist.c
+@@ -29,7 +29,7 @@ static inline bool al_is_valid_le(const struct ntfs_inode *ni,
+ void al_destroy(struct ntfs_inode *ni)
+ {
+ run_close(&ni->attr_list.run);
+- kfree(ni->attr_list.le);
++ kvfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.size = 0;
+ ni->attr_list.dirty = false;
+@@ -127,12 +127,13 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ {
+ size_t off;
+ u16 sz;
++ const unsigned le_min_size = le_size(0);
+
+ if (!le) {
+ le = ni->attr_list.le;
+ } else {
+ sz = le16_to_cpu(le->size);
+- if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
++ if (sz < le_min_size) {
+ /* Impossible 'cause we should not return such le. */
+ return NULL;
+ }
+@@ -141,7 +142,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+
+ /* Check boundary. */
+ off = PtrOffset(ni->attr_list.le, le);
+- if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
++ if (off + le_min_size > ni->attr_list.size) {
+ /* The regular end of list. */
+ return NULL;
+ }
+@@ -149,8 +150,7 @@ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
+ sz = le16_to_cpu(le->size);
+
+ /* Check le for errors. */
+- if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
+- off + sz > ni->attr_list.size ||
++ if (sz < le_min_size || off + sz > ni->attr_list.size ||
+ sz < le->name_off + le->name_len * sizeof(short)) {
+ return NULL;
+ }
+@@ -318,7 +318,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
+ memcpy(ptr, al->le, off);
+ memcpy(Add2Ptr(ptr, off + sz), le, old_size - off);
+ le = Add2Ptr(ptr, off);
+- kfree(al->le);
++ kvfree(al->le);
+ al->le = ptr;
+ } else {
+ memmove(Add2Ptr(le, sz), le, old_size - off);
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 63f14a0232f6a..845f9b22deef0 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -124,7 +124,7 @@ void wnd_close(struct wnd_bitmap *wnd)
+ {
+ struct rb_node *node, *next;
+
+- kfree(wnd->free_bits);
++ kvfree(wnd->free_bits);
+ wnd->free_bits = NULL;
+ run_close(&wnd->run);
+
+@@ -1360,7 +1360,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
+ memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short));
+ memset(new_free + wnd->nwnd, 0,
+ (new_wnd - wnd->nwnd) * sizeof(short));
+- kfree(wnd->free_bits);
++ kvfree(wnd->free_bits);
+ wnd->free_bits = new_free;
+ }
+
+diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
+index ec0566b322d5d..2c73ca469d514 100644
+--- a/fs/ntfs3/dir.c
++++ b/fs/ntfs3/dir.c
+@@ -309,11 +309,31 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
+ return 0;
+ }
+
+- /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
+- if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
+- dt_type = DT_LNK;
+- else
+- dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++ /*
++ * NTFS: symlinks are "dir + reparse" or "file + reparse"
++ * Unfortunately reparse attribute is used for many purposes (several dozens).
++ * It is not possible here to know is this name symlink or not.
++ * To get exactly the type of name we should to open inode (read mft).
++ * getattr for opened file (fstat) correctly returns symlink.
++ */
++ dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
++
++ /*
++ * It is not reliable to detect the type of name using duplicated information
++ * stored in parent directory.
++ * The only correct way to get the type of name - read MFT record and find ATTR_STD.
++ * The code below is not good idea.
++ * It does additional locks/reads just to get the type of name.
++ * Should we use additional mount option to enable branch below?
++ */
++ if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) &&
++ ino != ni->mi.rno) {
++ struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL);
++ if (!IS_ERR_OR_NULL(inode)) {
++ dt_type = fs_umode_to_dtype(inode->i_mode);
++ iput(inode);
++ }
++ }
+
+ return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
+ }
+@@ -495,11 +515,9 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ struct INDEX_HDR *hdr;
+ const struct ATTR_FILE_NAME *fname;
+ u32 e_size, off, end;
+- u64 vbo = 0;
+ size_t drs = 0, fles = 0, bit = 0;
+- loff_t i_size = ni->vfs_inode.i_size;
+ struct indx_node *node = NULL;
+- u8 index_bits = ni->dir.index_bits;
++ size_t max_indx = i_size_read(&ni->vfs_inode) >> ni->dir.index_bits;
+
+ if (is_empty)
+ *is_empty = true;
+@@ -543,7 +561,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ fles += 1;
+ }
+
+- if (vbo >= i_size)
++ if (bit >= max_indx)
+ goto out;
+
+ err = indx_used_bit(&ni->dir, ni, &bit);
+@@ -553,8 +571,7 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+ if (bit == MINUS_ONE_T)
+ goto out;
+
+- vbo = (u64)bit << index_bits;
+- if (vbo >= i_size)
++ if (bit >= max_indx)
+ goto out;
+
+ err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
+@@ -564,7 +581,6 @@ static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
+
+ hdr = &node->index->ihdr;
+ bit += 1;
+- vbo = (u64)bit << ni->dir.idx2vbn_bits;
+ }
+
+ out:
+diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
+index 1f7a194983c5d..dfd5402a42e44 100644
+--- a/fs/ntfs3/file.c
++++ b/fs/ntfs3/file.c
+@@ -260,6 +260,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ bool rw = vma->vm_flags & VM_WRITE;
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "mmap encrypted not supported");
+ return -EOPNOTSUPP;
+@@ -498,10 +501,14 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_punch_hole(ni, vbo, len, &frame_size);
+ ni_unlock(ni);
++ if (!err)
++ goto ok;
++
+ if (err != E_NTFS_NOTALIGNED)
+ goto out;
+
+ /* Process not aligned punch. */
++ err = 0;
+ mask = frame_size - 1;
+ vbo_a = (vbo + mask) & ~mask;
+ end_a = end & ~mask;
+@@ -524,6 +531,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ }
+ } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+ /*
+@@ -563,6 +572,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ ni_lock(ni);
+ err = attr_insert_range(ni, vbo, len);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ } else {
+ /* Check new size. */
+ u8 cluster_bits = sbi->cluster_bits;
+@@ -632,11 +643,18 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
+ &ni->file.run, i_size, &ni->i_valid,
+ true, NULL);
+ ni_unlock(ni);
++ if (err)
++ goto out;
+ } else if (new_size > i_size) {
+- inode->i_size = new_size;
++ i_size_write(inode, new_size);
+ }
+ }
+
++ok:
++ err = file_modified(file);
++ if (err)
++ goto out;
++
+ out:
+ if (map_locked)
+ filemap_invalidate_unlock(mapping);
+@@ -662,6 +680,9 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ umode_t mode = inode->i_mode;
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ err = setattr_prepare(idmap, dentry, attr);
+ if (err)
+ goto out;
+@@ -675,7 +696,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ goto out;
+ }
+ inode_dio_wait(inode);
+- oldsize = inode->i_size;
++ oldsize = i_size_read(inode);
+ newsize = attr->ia_size;
+
+ if (newsize <= oldsize)
+@@ -687,7 +708,7 @@ int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ goto out;
+
+ ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
+- inode->i_size = newsize;
++ i_size_write(inode, newsize);
+ }
+
+ setattr_copy(idmap, inode, attr);
+@@ -717,6 +738,9 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ struct inode *inode = file->f_mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "encrypted i/o not supported");
+ return -EOPNOTSUPP;
+@@ -751,6 +775,9 @@ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
+ struct inode *inode = in->f_mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "encrypted i/o not supported");
+ return -EOPNOTSUPP;
+@@ -820,7 +847,7 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
+ size_t count = iov_iter_count(from);
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(file);
+- loff_t i_size = inode->i_size;
++ loff_t i_size = i_size_read(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct ntfs_inode *ni = ntfs_i(inode);
+ u64 valid = ni->i_valid;
+@@ -1027,6 +1054,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
+ iocb->ki_pos += written;
+ if (iocb->ki_pos > ni->i_valid)
+ ni->i_valid = iocb->ki_pos;
++ if (iocb->ki_pos > i_size)
++ i_size_write(inode, iocb->ki_pos);
+
+ return written;
+ }
+@@ -1040,8 +1069,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
+ ssize_t ret;
++ int err;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (is_encrypted(ni)) {
+ ntfs_inode_warn(inode, "encrypted i/o not supported");
+ return -EOPNOTSUPP;
+@@ -1067,6 +1100,12 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ if (ret <= 0)
+ goto out;
+
++ err = file_modified(iocb->ki_filp);
++ if (err) {
++ ret = err;
++ goto out;
++ }
++
+ if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
+ /* Should never be here, see ntfs_file_open(). */
+ ret = -EOPNOTSUPP;
+@@ -1096,6 +1135,9 @@ int ntfs_file_open(struct inode *inode, struct file *file)
+ {
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
+ (file->f_flags & O_DIRECT))) {
+ return -EOPNOTSUPP;
+@@ -1137,7 +1179,8 @@ static int ntfs_file_release(struct inode *inode, struct file *file)
+ down_write(&ni->file.run_lock);
+
+ err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
+- inode->i_size, &ni->i_valid, false, NULL);
++ i_size_read(inode), &ni->i_valid, false,
++ NULL);
+
+ up_write(&ni->file.run_lock);
+ ni_unlock(ni);
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index dad976a689859..61c51650266ef 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -778,7 +778,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni)
+ run_deallocate(sbi, &ni->attr_list.run, true);
+ run_close(&ni->attr_list.run);
+ ni->attr_list.size = 0;
+- kfree(ni->attr_list.le);
++ kvfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.dirty = false;
+
+@@ -927,7 +927,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ return 0;
+
+ out:
+- kfree(ni->attr_list.le);
++ kvfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.size = 0;
+ return err;
+@@ -2099,7 +2099,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
+ gfp_t gfp_mask;
+ struct page *pg;
+
+- if (vbo >= ni->vfs_inode.i_size) {
++ if (vbo >= i_size_read(&ni->vfs_inode)) {
+ SetPageUptodate(page);
+ err = 0;
+ goto out;
+@@ -2173,7 +2173,7 @@ int ni_decompress_file(struct ntfs_inode *ni)
+ {
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct inode *inode = &ni->vfs_inode;
+- loff_t i_size = inode->i_size;
++ loff_t i_size = i_size_read(inode);
+ struct address_space *mapping = inode->i_mapping;
+ gfp_t gfp_mask = mapping_gfp_mask(mapping);
+ struct page **pages = NULL;
+@@ -2457,6 +2457,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ struct ATTR_LIST_ENTRY *le = NULL;
+ struct runs_tree *run = &ni->file.run;
+ u64 valid_size = ni->i_valid;
++ loff_t i_size = i_size_read(&ni->vfs_inode);
+ u64 vbo_disk;
+ size_t unc_size;
+ u32 frame_size, i, npages_disk, ondisk_size;
+@@ -2548,7 +2549,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ }
+ }
+
+- frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
++ frames = (i_size - 1) >> frame_bits;
+
+ err = attr_wof_frame_info(ni, attr, run, frame64, frames,
+ frame_bits, &ondisk_size, &vbo_data);
+@@ -2556,8 +2557,7 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ goto out2;
+
+ if (frame64 == frames) {
+- unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
+- (frame_size - 1));
++ unc_size = 1 + ((i_size - 1) & (frame_size - 1));
+ ondisk_size = attr_size(attr) - vbo_data;
+ } else {
+ unc_size = frame_size;
+@@ -3259,6 +3259,9 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
+ if (is_bad_inode(inode) || sb_rdonly(sb))
+ return 0;
+
++ if (unlikely(ntfs3_forced_shutdown(sb)))
++ return -EIO;
++
+ if (!ni_trylock(ni)) {
+ /* 'ni' is under modification, skip for now. */
+ mark_inode_dirty_sync(inode);
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index 98ccb66508583..855519713bf79 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -465,7 +465,7 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
+ {
+ const struct RESTART_AREA *ra;
+ u16 cl, fl, ul;
+- u32 off, l_size, file_dat_bits, file_size_round;
++ u32 off, l_size, seq_bits;
+ u16 ro = le16_to_cpu(rhdr->ra_off);
+ u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
+
+@@ -511,13 +511,15 @@ static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
+ /* Make sure the sequence number bits match the log file size. */
+ l_size = le64_to_cpu(ra->l_size);
+
+- file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
+- file_size_round = 1u << (file_dat_bits + 3);
+- if (file_size_round != l_size &&
+- (file_size_round < l_size || (file_size_round / 2) > l_size)) {
+- return false;
++ seq_bits = sizeof(u64) * 8 + 3;
++ while (l_size) {
++ l_size >>= 1;
++ seq_bits -= 1;
+ }
+
++ if (seq_bits != ra->seq_num_bits)
++ return false;
++
+ /* The log page data offset and record header length must be quad-aligned. */
+ if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
+ !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
+@@ -974,6 +976,16 @@ static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
+ return e;
+ }
+
++struct restart_info {
++ u64 last_lsn;
++ struct RESTART_HDR *r_page;
++ u32 vbo;
++ bool chkdsk_was_run;
++ bool valid_page;
++ bool initialized;
++ bool restart;
++};
++
+ #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
+
+ #define NTFSLOG_WRAPPED 0x00000001
+@@ -987,6 +999,7 @@ struct ntfs_log {
+ struct ntfs_inode *ni;
+
+ u32 l_size;
++ u32 orig_file_size;
+ u32 sys_page_size;
+ u32 sys_page_mask;
+ u32 page_size;
+@@ -1040,6 +1053,8 @@ struct ntfs_log {
+
+ struct CLIENT_ID client_id;
+ u32 client_undo_commit;
++
++ struct restart_info rst_info, rst_info2;
+ };
+
+ static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
+@@ -1105,16 +1120,6 @@ static inline bool verify_client_lsn(struct ntfs_log *log,
+ lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
+ }
+
+-struct restart_info {
+- u64 last_lsn;
+- struct RESTART_HDR *r_page;
+- u32 vbo;
+- bool chkdsk_was_run;
+- bool valid_page;
+- bool initialized;
+- bool restart;
+-};
+-
+ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ struct RECORD_PAGE_HDR **buffer, bool *usa_error)
+ {
+@@ -1176,7 +1181,7 @@ static int read_log_page(struct ntfs_log *log, u32 vbo,
+ * restart page header. It will stop the first time we find a
+ * valid page header.
+ */
+-static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
++static int log_read_rst(struct ntfs_log *log, bool first,
+ struct restart_info *info)
+ {
+ u32 skip, vbo;
+@@ -1192,7 +1197,7 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ }
+
+ /* Loop continuously until we succeed. */
+- for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
++ for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) {
+ bool usa_error;
+ bool brst, bchk;
+ struct RESTART_AREA *ra;
+@@ -1285,22 +1290,17 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
+ /*
+ * Ilog_init_pg_hdr - Init @log from restart page header.
+ */
+-static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
+- u32 page_size, u16 major_ver, u16 minor_ver)
++static void log_init_pg_hdr(struct ntfs_log *log, u16 major_ver, u16 minor_ver)
+ {
+- log->sys_page_size = sys_page_size;
+- log->sys_page_mask = sys_page_size - 1;
+- log->page_size = page_size;
+- log->page_mask = page_size - 1;
+- log->page_bits = blksize_bits(page_size);
++ log->sys_page_size = log->page_size;
++ log->sys_page_mask = log->page_mask;
+
+ log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
+ if (!log->clst_per_page)
+ log->clst_per_page = 1;
+
+- log->first_page = major_ver >= 2 ?
+- 0x22 * page_size :
+- ((sys_page_size << 1) + (page_size << 1));
++ log->first_page = major_ver >= 2 ? 0x22 * log->page_size :
++ 4 * log->page_size;
+ log->major_ver = major_ver;
+ log->minor_ver = minor_ver;
+ }
+@@ -1308,12 +1308,11 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
+ /*
+ * log_create - Init @log in cases when we don't have a restart area to use.
+ */
+-static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
++static void log_create(struct ntfs_log *log, const u64 last_lsn,
+ u32 open_log_count, bool wrapped, bool use_multi_page)
+ {
+- log->l_size = l_size;
+ /* All file offsets must be quadword aligned. */
+- log->file_data_bits = blksize_bits(l_size) - 3;
++ log->file_data_bits = blksize_bits(log->l_size) - 3;
+ log->seq_num_mask = (8 << log->file_data_bits) - 1;
+ log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
+ log->seq_num = (last_lsn >> log->file_data_bits) + 2;
+@@ -3720,10 +3719,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ struct ntfs_log *log;
+
+- struct restart_info rst_info, rst_info2;
+- u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
++ u64 rec_lsn, checkpt_lsn = 0, rlsn = 0;
+ struct ATTR_NAME_ENTRY *attr_names = NULL;
+- struct ATTR_NAME_ENTRY *ane;
+ struct RESTART_TABLE *dptbl = NULL;
+ struct RESTART_TABLE *trtbl = NULL;
+ const struct RESTART_TABLE *rt;
+@@ -3741,9 +3738,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ struct TRANSACTION_ENTRY *tr;
+ struct DIR_PAGE_ENTRY *dp;
+ u32 i, bytes_per_attr_entry;
+- u32 l_size = ni->vfs_inode.i_size;
+- u32 orig_file_size = l_size;
+- u32 page_size, vbo, tail, off, dlen;
++ u32 vbo, tail, off, dlen;
+ u32 saved_len, rec_len, transact_id;
+ bool use_second_page;
+ struct RESTART_AREA *ra2, *ra = NULL;
+@@ -3758,52 +3753,50 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ u16 t16;
+ u32 t32;
+
+- /* Get the size of page. NOTE: To replay we can use default page. */
+-#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
+- page_size = norm_file_page(PAGE_SIZE, &l_size, true);
+-#else
+- page_size = norm_file_page(PAGE_SIZE, &l_size, false);
+-#endif
+- if (!page_size)
+- return -EINVAL;
+-
+ log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
+ if (!log)
+ return -ENOMEM;
+
+ log->ni = ni;
+- log->l_size = l_size;
+- log->one_page_buf = kmalloc(page_size, GFP_NOFS);
++ log->l_size = log->orig_file_size = ni->vfs_inode.i_size;
+
++ /* Get the size of page. NOTE: To replay we can use default page. */
++#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
++ log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true);
++#else
++ log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false);
++#endif
++ if (!log->page_size) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ log->one_page_buf = kmalloc(log->page_size, GFP_NOFS);
+ if (!log->one_page_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+- log->page_size = page_size;
+- log->page_mask = page_size - 1;
+- log->page_bits = blksize_bits(page_size);
++ log->page_mask = log->page_size - 1;
++ log->page_bits = blksize_bits(log->page_size);
+
+ /* Look for a restart area on the disk. */
+- memset(&rst_info, 0, sizeof(struct restart_info));
+- err = log_read_rst(log, l_size, true, &rst_info);
++ err = log_read_rst(log, true, &log->rst_info);
+ if (err)
+ goto out;
+
+ /* remember 'initialized' */
+- *initialized = rst_info.initialized;
++ *initialized = log->rst_info.initialized;
+
+- if (!rst_info.restart) {
+- if (rst_info.initialized) {
++ if (!log->rst_info.restart) {
++ if (log->rst_info.initialized) {
+ /* No restart area but the file is not initialized. */
+ err = -EINVAL;
+ goto out;
+ }
+
+- log_init_pg_hdr(log, page_size, page_size, 1, 1);
+- log_create(log, l_size, 0, get_random_u32(), false, false);
+-
+- log->ra = ra;
++ log_init_pg_hdr(log, 1, 1);
++ log_create(log, 0, get_random_u32(), false, false);
+
+ ra = log_create_ra(log);
+ if (!ra) {
+@@ -3820,25 +3813,26 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ * If the restart offset above wasn't zero then we won't
+ * look for a second restart.
+ */
+- if (rst_info.vbo)
++ if (log->rst_info.vbo)
+ goto check_restart_area;
+
+- memset(&rst_info2, 0, sizeof(struct restart_info));
+- err = log_read_rst(log, l_size, false, &rst_info2);
++ err = log_read_rst(log, false, &log->rst_info2);
+ if (err)
+ goto out;
+
+ /* Determine which restart area to use. */
+- if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
++ if (!log->rst_info2.restart ||
++ log->rst_info2.last_lsn <= log->rst_info.last_lsn)
+ goto use_first_page;
+
+ use_second_page = true;
+
+- if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
++ if (log->rst_info.chkdsk_was_run &&
++ log->page_size != log->rst_info.vbo) {
+ struct RECORD_PAGE_HDR *sp = NULL;
+ bool usa_error;
+
+- if (!read_log_page(log, page_size, &sp, &usa_error) &&
++ if (!read_log_page(log, log->page_size, &sp, &usa_error) &&
+ sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
+ use_second_page = false;
+ }
+@@ -3846,52 +3840,43 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ }
+
+ if (use_second_page) {
+- kfree(rst_info.r_page);
+- memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
+- rst_info2.r_page = NULL;
++ kfree(log->rst_info.r_page);
++ memcpy(&log->rst_info, &log->rst_info2,
++ sizeof(struct restart_info));
++ log->rst_info2.r_page = NULL;
+ }
+
+ use_first_page:
+- kfree(rst_info2.r_page);
++ kfree(log->rst_info2.r_page);
+
+ check_restart_area:
+ /*
+ * If the restart area is at offset 0, we want
+ * to write the second restart area first.
+ */
+- log->init_ra = !!rst_info.vbo;
++ log->init_ra = !!log->rst_info.vbo;
+
+ /* If we have a valid page then grab a pointer to the restart area. */
+- ra2 = rst_info.valid_page ?
+- Add2Ptr(rst_info.r_page,
+- le16_to_cpu(rst_info.r_page->ra_off)) :
++ ra2 = log->rst_info.valid_page ?
++ Add2Ptr(log->rst_info.r_page,
++ le16_to_cpu(log->rst_info.r_page->ra_off)) :
+ NULL;
+
+- if (rst_info.chkdsk_was_run ||
++ if (log->rst_info.chkdsk_was_run ||
+ (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
+ bool wrapped = false;
+ bool use_multi_page = false;
+ u32 open_log_count;
+
+ /* Do some checks based on whether we have a valid log page. */
+- if (!rst_info.valid_page) {
+- open_log_count = get_random_u32();
+- goto init_log_instance;
+- }
+- open_log_count = le32_to_cpu(ra2->open_log_count);
+-
+- /*
+- * If the restart page size isn't changing then we want to
+- * check how much work we need to do.
+- */
+- if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
+- goto init_log_instance;
++ open_log_count = log->rst_info.valid_page ?
++ le32_to_cpu(ra2->open_log_count) :
++ get_random_u32();
+
+-init_log_instance:
+- log_init_pg_hdr(log, page_size, page_size, 1, 1);
++ log_init_pg_hdr(log, 1, 1);
+
+- log_create(log, l_size, rst_info.last_lsn, open_log_count,
+- wrapped, use_multi_page);
++ log_create(log, log->rst_info.last_lsn, open_log_count, wrapped,
++ use_multi_page);
+
+ ra = log_create_ra(log);
+ if (!ra) {
+@@ -3916,28 +3901,27 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ * use the log file. We must use the system page size instead of the
+ * default size if there is not a clean shutdown.
+ */
+- t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
+- if (page_size != t32) {
+- l_size = orig_file_size;
+- page_size =
+- norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
++ t32 = le32_to_cpu(log->rst_info.r_page->sys_page_size);
++ if (log->page_size != t32) {
++ log->l_size = log->orig_file_size;
++ log->page_size = norm_file_page(t32, &log->l_size,
++ t32 == DefaultLogPageSize);
+ }
+
+- if (page_size != t32 ||
+- page_size != le32_to_cpu(rst_info.r_page->page_size)) {
++ if (log->page_size != t32 ||
++ log->page_size != le32_to_cpu(log->rst_info.r_page->page_size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* If the file size has shrunk then we won't mount it. */
+- if (l_size < le64_to_cpu(ra2->l_size)) {
++ if (log->l_size < le64_to_cpu(ra2->l_size)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+- log_init_pg_hdr(log, page_size, page_size,
+- le16_to_cpu(rst_info.r_page->major_ver),
+- le16_to_cpu(rst_info.r_page->minor_ver));
++ log_init_pg_hdr(log, le16_to_cpu(log->rst_info.r_page->major_ver),
++ le16_to_cpu(log->rst_info.r_page->minor_ver));
+
+ log->l_size = le64_to_cpu(ra2->l_size);
+ log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
+@@ -3945,7 +3929,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ log->seq_num_mask = (8 << log->file_data_bits) - 1;
+ log->last_lsn = le64_to_cpu(ra2->current_lsn);
+ log->seq_num = log->last_lsn >> log->file_data_bits;
+- log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
++ log->ra_off = le16_to_cpu(log->rst_info.r_page->ra_off);
+ log->restart_size = log->sys_page_size - log->ra_off;
+ log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
+ log->ra_size = le16_to_cpu(ra2->ra_len);
+@@ -4045,7 +4029,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ log->current_avail = current_log_avail(log);
+
+ /* Remember which restart area to write first. */
+- log->init_ra = rst_info.vbo;
++ log->init_ra = log->rst_info.vbo;
+
+ process_log:
+ /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
+@@ -4105,7 +4089,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ log->client_id.seq_num = cr->seq_num;
+ log->client_id.client_idx = client;
+
+- err = read_rst_area(log, &rst, &ra_lsn);
++ err = read_rst_area(log, &rst, &checkpt_lsn);
+ if (err)
+ goto out;
+
+@@ -4114,9 +4098,8 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+
+ bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
+
+- checkpt_lsn = le64_to_cpu(rst->check_point_start);
+- if (!checkpt_lsn)
+- checkpt_lsn = ra_lsn;
++ if (rst->check_point_start)
++ checkpt_lsn = le64_to_cpu(rst->check_point_start);
+
+ /* Allocate and Read the Transaction Table. */
+ if (!rst->transact_table_len)
+@@ -4330,23 +4313,20 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ lcb = NULL;
+
+ check_attribute_names2:
+- if (!rst->attr_names_len)
+- goto trace_attribute_table;
+-
+- ane = attr_names;
+- if (!oatbl)
+- goto trace_attribute_table;
+- while (ane->off) {
+- /* TODO: Clear table on exit! */
+- oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
+- t16 = le16_to_cpu(ane->name_bytes);
+- oe->name_len = t16 / sizeof(short);
+- oe->ptr = ane->name;
+- oe->is_attr_name = 2;
+- ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
+- }
+-
+-trace_attribute_table:
++ if (rst->attr_names_len && oatbl) {
++ struct ATTR_NAME_ENTRY *ane = attr_names;
++ while (ane->off) {
++ /* TODO: Clear table on exit! */
++ oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
++ t16 = le16_to_cpu(ane->name_bytes);
++ oe->name_len = t16 / sizeof(short);
++ oe->ptr = ane->name;
++ oe->is_attr_name = 2;
++ ane = Add2Ptr(ane,
++ sizeof(struct ATTR_NAME_ENTRY) + t16);
++ }
++ }
++
+ /*
+ * If the checkpt_lsn is zero, then this is a freshly
+ * formatted disk and we have no work to do.
+@@ -5189,7 +5169,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
+ kfree(oatbl);
+ kfree(dptbl);
+ kfree(attr_names);
+- kfree(rst_info.r_page);
++ kfree(log->rst_info.r_page);
+
+ kfree(ra);
+ kfree(log->one_page_buf);
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index fbfe21dbb4259..c66b0eab6a160 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -853,7 +853,8 @@ void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
+ /*
+ * sb can be NULL here. In this case sbi->flags should be 0 too.
+ */
+- if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR))
++ if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR) ||
++ unlikely(ntfs3_forced_shutdown(sb)))
+ return;
+
+ blocksize = sb->s_blocksize;
+@@ -1006,6 +1007,30 @@ static inline __le32 security_hash(const void *sd, size_t bytes)
+ return cpu_to_le32(hash);
+ }
+
++/*
++ * simple wrapper for sb_bread_unmovable.
++ */
++struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
++{
++ struct ntfs_sb_info *sbi = sb->s_fs_info;
++ struct buffer_head *bh;
++
++ if (unlikely(block >= sbi->volume.blocks)) {
++ /* prevent generic message "attempt to access beyond end of device" */
++ ntfs_err(sb, "try to read out of volume at offset 0x%llx",
++ (u64)block << sb->s_blocksize_bits);
++ return NULL;
++ }
++
++ bh = sb_bread_unmovable(sb, block);
++ if (bh)
++ return bh;
++
++ ntfs_err(sb, "failed to read volume at offset 0x%llx",
++ (u64)block << sb->s_blocksize_bits);
++ return NULL;
++}
++
+ int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
+ {
+ struct block_device *bdev = sb->s_bdev;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index cf92b2433f7a7..daabaad63aaf6 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1462,7 +1462,7 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ goto out2;
+
+ if (in->name == I30_NAME) {
+- ni->vfs_inode.i_size = data_size;
++ i_size_write(&ni->vfs_inode, data_size);
+ inode_set_bytes(&ni->vfs_inode, alloc_size);
+ }
+
+@@ -1544,7 +1544,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
+ }
+
+ if (in->name == I30_NAME)
+- ni->vfs_inode.i_size = data_size;
++ i_size_write(&ni->vfs_inode, data_size);
+
+ *vbn = bit << indx->idx2vbn_bits;
+
+@@ -2090,7 +2090,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
+ return err;
+
+ if (in->name == I30_NAME)
+- ni->vfs_inode.i_size = new_data;
++ i_size_write(&ni->vfs_inode, new_data);
+
+ bpb = bitmap_size(bit);
+ if (bpb * 8 == nbits)
+@@ -2576,7 +2576,7 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
+ err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
+ &indx->alloc_run, 0, NULL, false, NULL);
+ if (in->name == I30_NAME)
+- ni->vfs_inode.i_size = 0;
++ i_size_write(&ni->vfs_inode, 0);
+
+ err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
+ false, NULL);
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index d6d021e19aaa2..34f2e16f3f5b6 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -410,7 +410,6 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+ goto out;
+
+ if (!is_match && name) {
+- /* Reuse rec as buffer for ascii name. */
+ err = -ENOENT;
+ goto out;
+ }
+@@ -425,6 +424,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
+
+ if (names != le16_to_cpu(rec->hard_links)) {
+ /* Correct minor error on the fly. Do not mark inode as dirty. */
++ ntfs_inode_warn(inode, "Correct links count -> %u.", names);
+ rec->hard_links = cpu_to_le16(names);
+ ni->mi.dirty = true;
+ }
+@@ -851,9 +851,13 @@ static int ntfs_resident_writepage(struct folio *folio,
+ struct writeback_control *wbc, void *data)
+ {
+ struct address_space *mapping = data;
+- struct ntfs_inode *ni = ntfs_i(mapping->host);
++ struct inode *inode = mapping->host;
++ struct ntfs_inode *ni = ntfs_i(inode);
+ int ret;
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ ni_lock(ni);
+ ret = attr_data_write_resident(ni, &folio->page);
+ ni_unlock(ni);
+@@ -867,7 +871,12 @@ static int ntfs_resident_writepage(struct folio *folio,
+ static int ntfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+ {
+- if (is_resident(ntfs_i(mapping->host)))
++ struct inode *inode = mapping->host;
++
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
++ if (is_resident(ntfs_i(inode)))
+ return write_cache_pages(mapping, wbc, ntfs_resident_writepage,
+ mapping);
+ return mpage_writepages(mapping, wbc, ntfs_get_block);
+@@ -887,6 +896,9 @@ int ntfs_write_begin(struct file *file, struct address_space *mapping,
+ struct inode *inode = mapping->host;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ *pagep = NULL;
+ if (is_resident(ni)) {
+ struct page *page =
+@@ -971,7 +983,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
+ }
+
+ if (pos + err > inode->i_size) {
+- inode->i_size = pos + err;
++ i_size_write(inode, pos + err);
+ dirty = true;
+ }
+
+@@ -1303,6 +1315,11 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ goto out1;
+ }
+
++ if (unlikely(ntfs3_forced_shutdown(sb))) {
++ err = -EIO;
++ goto out2;
++ }
++
+ /* Mark rw ntfs as dirty. it will be cleared at umount. */
+ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
+
+diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
+index eedacf94edd80..b5687d74b4495 100644
+--- a/fs/ntfs3/namei.c
++++ b/fs/ntfs3/namei.c
+@@ -181,6 +181,9 @@ static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
+ struct ntfs_inode *ni = ntfs_i(dir);
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
++ return -EIO;
++
+ ni_lock_dir(ni);
+
+ err = ntfs_unlink_inode(dir, dentry);
+@@ -199,6 +202,9 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
+ u32 size = strlen(symname);
+ struct inode *inode;
+
++ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
++ return -EIO;
++
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0,
+ symname, size, NULL);
+
+@@ -227,6 +233,9 @@ static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
+ struct ntfs_inode *ni = ntfs_i(dir);
+ int err;
+
++ if (unlikely(ntfs3_forced_shutdown(dir->i_sb)))
++ return -EIO;
++
+ ni_lock_dir(ni);
+
+ err = ntfs_unlink_inode(dir, dentry);
+@@ -264,6 +273,9 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
+ 1024);
+ static_assert(PATH_MAX >= 4 * 1024);
+
++ if (unlikely(ntfs3_forced_shutdown(sb)))
++ return -EIO;
++
+ if (flags & ~RENAME_NOREPLACE)
+ return -EINVAL;
+
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 86aecbb01a92f..f61f5b3adb03a 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -523,12 +523,10 @@ struct ATTR_LIST_ENTRY {
+ __le64 vcn; // 0x08: Starting VCN of this attribute.
+ struct MFT_REF ref; // 0x10: MFT record number with attribute.
+ __le16 id; // 0x18: struct ATTRIB ID.
+- __le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset.
++ __le16 name[]; // 0x1A: Just to align. To get real name can use name_off.
+
+ }; // sizeof(0x20)
+
+-static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
+-
+ static inline u32 le_size(u8 name_len)
+ {
+ return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) +
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 29a9b0b29e4f8..1ca40c1d966b8 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -61,6 +61,8 @@ enum utf16_endian;
+
+ /* sbi->flags */
+ #define NTFS_FLAGS_NODISCARD 0x00000001
++/* ntfs in shutdown state. */
++#define NTFS_FLAGS_SHUTDOWN_BIT 0x00000002 /* == 4*/
+ /* Set when LogFile is replaying. */
+ #define NTFS_FLAGS_LOG_REPLAYING 0x00000008
+ /* Set when we changed first MFT's which copy must be updated in $MftMirr. */
+@@ -226,7 +228,7 @@ struct ntfs_sb_info {
+ u64 maxbytes; // Maximum size for normal files.
+ u64 maxbytes_sparse; // Maximum size for sparse file.
+
+- u32 flags; // See NTFS_FLAGS_XXX.
++ unsigned long flags; // See NTFS_FLAGS_
+
+ CLST zone_max; // Maximum MFT zone length in clusters
+ CLST bad_clusters; // The count of marked bad clusters.
+@@ -584,6 +586,7 @@ bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
+ int log_replay(struct ntfs_inode *ni, bool *initialized);
+
+ /* Globals from fsntfs.c */
++struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block);
+ bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
+ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
+ bool simple);
+@@ -999,6 +1002,11 @@ static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
+ return sb->s_fs_info;
+ }
+
++static inline int ntfs3_forced_shutdown(struct super_block *sb)
++{
++ return test_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
++}
++
+ /*
+ * ntfs_up_cluster - Align up on cluster boundary.
+ */
+@@ -1025,19 +1033,6 @@ static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
+ return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+ }
+
+-static inline struct buffer_head *ntfs_bread(struct super_block *sb,
+- sector_t block)
+-{
+- struct buffer_head *bh = sb_bread(sb, block);
+-
+- if (bh)
+- return bh;
+-
+- ntfs_err(sb, "failed to read volume at offset 0x%llx",
+- (u64)block << sb->s_blocksize_bits);
+- return NULL;
+-}
+-
+ static inline struct ntfs_inode *ntfs_i(struct inode *inode)
+ {
+ return container_of(inode, struct ntfs_inode, vfs_inode);
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 53629b1f65e99..6aa3a9d44df1b 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -279,7 +279,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
+ if (t16 > asize)
+ return NULL;
+
+- if (t16 + le32_to_cpu(attr->res.data_size) > asize)
++ if (le32_to_cpu(attr->res.data_size) > asize - t16)
+ return NULL;
+
+ t32 = sizeof(short) * attr->name_len;
+@@ -535,8 +535,20 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
+ return false;
+
+ if (ni && is_attr_indexed(attr)) {
+- le16_add_cpu(&ni->mi.mrec->hard_links, -1);
+- ni->mi.dirty = true;
++ u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
++ struct ATTR_FILE_NAME *fname =
++ attr->type != ATTR_NAME ?
++ NULL :
++ resident_data_ex(attr,
++ SIZEOF_ATTRIBUTE_FILENAME);
++ if (fname && fname->type == FILE_NAME_DOS) {
++ /* Do not decrease links count deleting DOS name. */
++ } else if (!links) {
++ /* minor error. Not critical. */
++ } else {
++ ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
++ ni->mi.dirty = true;
++ }
+ }
+
+ used -= asize;
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index f763e3256ccc1..eb50602297406 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -625,7 +625,7 @@ static void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
+ {
+ kfree(sbi->new_rec);
+ kvfree(ntfs_put_shared(sbi->upcase));
+- kfree(sbi->def_table);
++ kvfree(sbi->def_table);
+ kfree(sbi->compress.lznt);
+ #ifdef CONFIG_NTFS3_LZX_XPRESS
+ xpress_free_decompressor(sbi->compress.xpress);
+@@ -714,6 +714,14 @@ static int ntfs_show_options(struct seq_file *m, struct dentry *root)
+ return 0;
+ }
+
++/*
++ * ntfs_shutdown - super_operations::shutdown
++ */
++static void ntfs_shutdown(struct super_block *sb)
++{
++ set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &ntfs_sb(sb)->flags);
++}
++
+ /*
+ * ntfs_sync_fs - super_operations::sync_fs
+ */
+@@ -724,6 +732,9 @@ static int ntfs_sync_fs(struct super_block *sb, int wait)
+ struct ntfs_inode *ni;
+ struct inode *inode;
+
++ if (unlikely(ntfs3_forced_shutdown(sb)))
++ return -EIO;
++
+ ni = sbi->security.ni;
+ if (ni) {
+ inode = &ni->vfs_inode;
+@@ -763,6 +774,7 @@ static const struct super_operations ntfs_sops = {
+ .put_super = ntfs_put_super,
+ .statfs = ntfs_statfs,
+ .show_options = ntfs_show_options,
++ .shutdown = ntfs_shutdown,
+ .sync_fs = ntfs_sync_fs,
+ .write_inode = ntfs3_write_inode,
+ };
+@@ -865,6 +877,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ u16 fn, ao;
+ u8 cluster_bits;
+ u32 boot_off = 0;
++ sector_t boot_block = 0;
+ const char *hint = "Primary boot";
+
+ /* Save original dev_size. Used with alternative boot. */
+@@ -872,11 +885,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+
+ sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+
+- bh = ntfs_bread(sb, 0);
++read_boot:
++ bh = ntfs_bread(sb, boot_block);
+ if (!bh)
+- return -EIO;
++ return boot_block ? -EINVAL : -EIO;
+
+-check_boot:
+ err = -EINVAL;
+
+ /* Corrupted image; do not read OOB */
+@@ -1107,26 +1120,24 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ }
+
+ out:
+- if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
++ brelse(bh);
++
++ if (err == -EINVAL && !boot_block && dev_size0 > PAGE_SHIFT) {
+ u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
+ u64 lbo = dev_size0 - sizeof(*boot);
+
+- /*
+- * Try alternative boot (last sector)
+- */
+- brelse(bh);
+-
+- sb_set_blocksize(sb, block_size);
+- bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
+- if (!bh)
+- return -EINVAL;
+-
++ boot_block = lbo >> blksize_bits(block_size);
+ boot_off = lbo & (block_size - 1);
+- hint = "Alternative boot";
+- dev_size = dev_size0; /* restore original size. */
+- goto check_boot;
++ if (boot_block && block_size >= boot_off + sizeof(*boot)) {
++ /*
++ * Try alternative boot (last sector)
++ */
++ sb_set_blocksize(sb, block_size);
++ hint = "Alternative boot";
++ dev_size = dev_size0; /* restore original size. */
++ goto read_boot;
++ }
+ }
+- brelse(bh);
+
+ return err;
+ }
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index 4920548192a0c..b50010494e6d0 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -219,6 +219,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
+ if (!ea->name_len)
+ break;
+
++ if (ea->name_len > ea_size)
++ break;
++
+ if (buffer) {
+ /* Check if we can use field ea->name */
+ if (off + ea_size > size)
+@@ -744,6 +747,9 @@ static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
+ int err;
+ struct ntfs_inode *ni = ntfs_i(inode);
+
++ if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
++ return -EIO;
++
+ /* Dispatch request. */
+ if (!strcmp(name, SYSTEM_DOS_ATTRIB)) {
+ /* system.dos_attrib */
+diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
+index d64a306a414be..5730c65ffb40d 100644
+--- a/fs/smb/client/cached_dir.c
++++ b/fs/smb/client/cached_dir.c
+@@ -151,7 +151,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ return -EOPNOTSUPP;
+
+ ses = tcon->ses;
+- server = ses->server;
++ server = cifs_pick_channel(ses);
+ cfids = tcon->cfids;
+
+ if (!server->ops->new_lease_key)
+@@ -367,6 +367,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ atomic_inc(&tcon->num_remote_opens);
+ }
+ kfree(utf16_path);
++
+ return rc;
+ }
+
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+index ef4c2e3c9fa61..6322f0f68a176 100644
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -572,7 +572,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
+ UniStrupr(user);
+ } else {
+- memset(user, '\0', 2);
++ *(u16 *)user = 0;
+ }
+
+ rc = crypto_shash_update(ses->server->secmech.hmacmd5,
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 942e6ece56b1a..462554917e5a1 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -82,7 +82,7 @@
+ #define SMB_INTERFACE_POLL_INTERVAL 600
+
+ /* maximum number of PDUs in one compound */
+-#define MAX_COMPOUND 5
++#define MAX_COMPOUND 7
+
+ /*
+ * Default number of credits to keep available for SMB3.
+@@ -1018,6 +1018,8 @@ struct cifs_chan {
+ __u8 signkey[SMB3_SIGN_KEY_SIZE];
+ };
+
++#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
++
+ /*
+ * Session structure. One of these for each uid session with a particular host
+ */
+@@ -1050,6 +1052,7 @@ struct cifs_ses {
+ enum securityEnum sectype; /* what security flavor was specified? */
+ bool sign; /* is signing required? */
+ bool domainAuto:1;
++ unsigned int flags;
+ __u16 session_flags;
+ __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+@@ -1820,6 +1823,13 @@ static inline bool is_retryable_error(int error)
+ return false;
+ }
+
++static inline bool is_replayable_error(int error)
++{
++ if (error == -EAGAIN || error == -ECONNABORTED)
++ return true;
++ return false;
++}
++
+
+ /* cifs_get_writable_file() flags */
+ #define FIND_WR_ANY 0
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 19440255944b0..a4147e999736a 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -229,6 +229,12 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
+ /* check if iface is still active */
+ spin_lock(&ses->chan_lock);
++ if (cifs_ses_get_chan_index(ses, server) ==
++ CIFS_INVAL_CHAN_INDEX) {
++ spin_unlock(&ses->chan_lock);
++ continue;
++ }
++
+ if (!cifs_chan_is_iface_active(ses, server)) {
+ spin_unlock(&ses->chan_lock);
+ cifs_chan_update_iface(ses, server);
+@@ -4226,6 +4232,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
++
++ /* if tcon is marked for needing reconnect, update state */
++ if (tcon->need_reconnect)
++ tcon->status = TID_NEED_TCON;
++
+ if (tcon->status == TID_GOOD) {
+ spin_unlock(&tcon->tc_lock);
+ return 0;
+diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
+index a8a1d386da656..449c59830039b 100644
+--- a/fs/smb/client/dfs.c
++++ b/fs/smb/client/dfs.c
+@@ -565,6 +565,11 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
++
++ /* if tcon is marked for needing reconnect, update state */
++ if (tcon->need_reconnect)
++ tcon->status = TID_NEED_TCON;
++
+ if (tcon->status == TID_GOOD) {
+ spin_unlock(&tcon->tc_lock);
+ return 0;
+@@ -625,8 +630,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ spin_lock(&tcon->tc_lock);
+ if (tcon->status == TID_IN_TCON)
+ tcon->status = TID_GOOD;
+- spin_unlock(&tcon->tc_lock);
+ tcon->need_reconnect = false;
++ spin_unlock(&tcon->tc_lock);
+ }
+
+ return rc;
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 32a8525415d96..4cbb5487bd8d0 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -175,6 +175,9 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+
+ /* only send once per connect */
+ spin_lock(&tcon->tc_lock);
++ if (tcon->need_reconnect)
++ tcon->status = TID_NEED_RECON;
++
+ if (tcon->status != TID_NEED_RECON) {
+ spin_unlock(&tcon->tc_lock);
+ return;
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 75f2c8734ff56..6ecbf48d0f0c6 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -210,7 +210,7 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
+
+ switch (match_token(value, cifs_secflavor_tokens, args)) {
+ case Opt_sec_krb5p:
+- cifs_errorf(fc, "sec=krb5p is not supported!\n");
++ cifs_errorf(fc, "sec=krb5p is not supported. Use sec=krb5,seal instead\n");
+ return 1;
+ case Opt_sec_krb5i:
+ ctx->sign = true;
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index d30ea2005eb36..e23cd216bffbe 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -299,14 +299,16 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
+ }
+
+ static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+- SEARCH_ID_FULL_DIR_INFO *info,
++ const void *info,
+ struct cifs_sb_info *cifs_sb)
+ {
++ const FILE_FULL_DIRECTORY_INFO *di = info;
++
+ __dir_info_to_fattr(fattr, info);
+
+- /* See MS-FSCC 2.4.19 FileIdFullDirectoryInformation */
++ /* See MS-FSCC 2.4.14, 2.4.19 */
+ if (fattr->cf_cifsattrs & ATTR_REPARSE)
+- fattr->cf_cifstag = le32_to_cpu(info->EaSize);
++ fattr->cf_cifstag = le32_to_cpu(di->EaSize);
+ cifs_fill_common_info(fattr, cifs_sb);
+ }
+
+@@ -420,7 +422,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ } else /* not srvinos - BB fixme add check for backlevel? */ {
+- cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
++ cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
+ }
+
+ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+@@ -1014,10 +1016,9 @@ static int cifs_filldir(char *find_entry, struct file *file,
+ (FIND_FILE_STANDARD_INFO *)find_entry,
+ cifs_sb);
+ break;
++ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+ case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+- cifs_fulldir_info_to_fattr(&fattr,
+- (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+- cifs_sb);
++ cifs_fulldir_info_to_fattr(&fattr, find_entry, cifs_sb);
+ break;
+ default:
+ cifs_dir_info_to_fattr(&fattr,
+diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
+index a20a5d0836dc9..2fc2fbb260bf0 100644
+--- a/fs/smb/client/sess.c
++++ b/fs/smb/client/sess.c
+@@ -75,6 +75,10 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
+ {
+ unsigned int i;
+
++ /* if the channel is waiting for termination */
++ if (server && server->terminate)
++ return CIFS_INVAL_CHAN_INDEX;
++
+ for (i = 0; i < ses->chan_count; i++) {
+ if (ses->chans[i].server == server)
+ return i;
+@@ -84,7 +88,6 @@ cifs_ses_get_chan_index(struct cifs_ses *ses,
+ if (server)
+ cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
+ server->conn_id);
+- WARN_ON(1);
+ return CIFS_INVAL_CHAN_INDEX;
+ }
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 5d9c87d2e1e01..9d34a55fdb5e4 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -178,6 +178,7 @@ cifs_chan_skip_or_disable(struct cifs_ses *ses,
+ }
+
+ ses->chans[chan_index].server = NULL;
++ server->terminate = true;
+ spin_unlock(&ses->chan_lock);
+
+ /*
+@@ -188,7 +189,6 @@ cifs_chan_skip_or_disable(struct cifs_ses *ses,
+ */
+ cifs_put_tcp_session(server, from_reconnect);
+
+- server->terminate = true;
+ cifs_signal_cifsd_for_reconnect(server, false);
+
+ /* mark primary server as needing reconnect */
+@@ -399,6 +399,15 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ goto out;
+ }
+
++ spin_lock(&ses->ses_lock);
++ if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
++ spin_unlock(&ses->ses_lock);
++ mutex_unlock(&ses->session_mutex);
++ goto skip_add_channels;
++ }
++ ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
++ spin_unlock(&ses->ses_lock);
++
+ if (!rc &&
+ (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ mutex_unlock(&ses->session_mutex);
+@@ -428,15 +437,22 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ if (ses->chan_max > ses->chan_count &&
+ ses->iface_count &&
+ !SERVER_IS_CHAN(server)) {
+- if (ses->chan_count == 1)
++ if (ses->chan_count == 1) {
+ cifs_server_dbg(VFS, "supports multichannel now\n");
++ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
++ (SMB_INTERFACE_POLL_INTERVAL * HZ));
++ }
+
+ cifs_try_adding_channels(ses);
+ }
+ } else {
+ mutex_unlock(&ses->session_mutex);
+ }
++
+ skip_add_channels:
++ spin_lock(&ses->ses_lock);
++ ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
++ spin_unlock(&ses->ses_lock);
+
+ if (smb2_command != SMB2_INTERNAL_CMD)
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+@@ -5076,6 +5092,9 @@ int SMB2_query_directory_init(const unsigned int xid,
+ case SMB_FIND_FILE_POSIX_INFO:
+ req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
+ break;
++ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++ req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
++ break;
+ default:
+ cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ info_level);
+@@ -5145,6 +5164,9 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
+ /* note that posix payload are variable size */
+ info_buf_size = sizeof(struct smb2_posix_info);
+ break;
++ case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
++ info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
++ break;
+ default:
+ cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
+ srch_inf->info_level);
+diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
+index 4f717ad7c21b4..994d701934329 100644
+--- a/fs/smb/client/transport.c
++++ b/fs/smb/client/transport.c
+@@ -400,10 +400,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ server->conn_id, server->hostname);
+ }
+ smbd_done:
+- if (rc < 0 && rc != -EINTR)
++ /*
++ * there's hardly any use for the layers above to know the
++ * actual error code here. All they should do at this point is
++ * to retry the connection and hope it goes away.
++ */
++ if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
+ cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
+ rc);
+- else if (rc > 0)
++ rc = -ECONNABORTED;
++ cifs_signal_cifsd_for_reconnect(server, false);
++ } else if (rc > 0)
+ rc = 0;
+ out:
+ cifs_in_send_dec(server);
+@@ -428,8 +435,8 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ if (!(flags & CIFS_TRANSFORM_REQ))
+ return __smb_send_rqst(server, num_rqst, rqst);
+
+- if (num_rqst > MAX_COMPOUND - 1)
+- return -ENOMEM;
++ if (WARN_ON_ONCE(num_rqst > MAX_COMPOUND - 1))
++ return -EIO;
+
+ if (!server->ops->init_transform_rq) {
+ cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
+@@ -1026,6 +1033,9 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+ if (!server || server->terminate)
+ continue;
+
++ if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
++ continue;
++
+ /*
+ * strictly speaking, we should pick up req_lock to read
+ * server->in_flight. But it shouldn't matter much here if we
+diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
+index bf9823956758c..f703fb8030de2 100644
+--- a/include/linux/ceph/osd_client.h
++++ b/include/linux/ceph/osd_client.h
+@@ -45,6 +45,7 @@ enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
++ CEPH_SPARSE_READ_DATA_PRE,
+ CEPH_SPARSE_READ_DATA,
+ };
+
+@@ -64,7 +65,7 @@ struct ceph_sparse_read {
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+- __le32 sr_datalen; /* length of actual data */
++ u32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index d08b97dacd2d9..6e558264b4ab1 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -352,6 +352,8 @@ enum rw_hint {
+ * unrelated IO (like cache flushing, new IO generation, etc).
+ */
+ #define IOCB_DIO_CALLER_COMP (1 << 22)
++/* kiocb is a read or write operation submitted by fs/aio.c. */
++#define IOCB_AIO_RW (1 << 23)
+
+ /* for use in trace events */
+ #define TRACE_IOCB_STRINGS \
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index 1c1072e3ca063..ed57c23f80ac2 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -118,6 +118,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
+ int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+ #endif
+ void memblock_trim_memory(phys_addr_t align);
++unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
++ phys_addr_t base2, phys_addr_t size2);
+ bool memblock_overlaps_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size);
+ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 51eb83f779388..643e9ba4e64bd 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1102,7 +1102,7 @@ struct mlx5_ifc_roce_cap_bits {
+ u8 sw_r_roce_src_udp_port[0x1];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 fl_rc_qp_when_roce_enabled[0x1];
+- u8 reserved_at_7[0x1];
++ u8 roce_cc_general[0x1];
+ u8 qp_ooo_transmit_default[0x1];
+ u8 reserved_at_9[0x15];
+ u8 qp_ts_format[0x2];
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 493487ed7c388..cb25db2a93dd1 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -552,6 +552,11 @@ static inline int swap_duplicate(swp_entry_t swp)
+ return 0;
+ }
+
++static inline int swapcache_prepare(swp_entry_t swp)
++{
++ return 0;
++}
++
+ static inline void swap_free(swp_entry_t swp)
+ {
+ }
+diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
+index c48186bf47372..21da31e1dff5d 100644
+--- a/include/net/ipv6_stubs.h
++++ b/include/net/ipv6_stubs.h
+@@ -85,6 +85,11 @@ struct ipv6_bpf_stub {
+ sockptr_t optval, unsigned int optlen);
+ int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
+ sockptr_t optval, sockptr_t optlen);
++ int (*ipv6_dev_get_saddr)(struct net *net,
++ const struct net_device *dst_dev,
++ const struct in6_addr *daddr,
++ unsigned int prefs,
++ struct in6_addr *saddr);
+ };
+ extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
+
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 692d5955911c7..4a767b3d20b9d 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -275,7 +275,7 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+ }
+
+ void flow_offload_route_init(struct flow_offload *flow,
+- const struct nf_flow_route *route);
++ struct nf_flow_route *route);
+
+ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+ void flow_offload_refresh(struct nf_flowtable *flow_table,
+diff --git a/include/net/switchdev.h b/include/net/switchdev.h
+index a43062d4c734b..8346b0d29542c 100644
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -308,6 +308,9 @@ void switchdev_deferred_process(void);
+ int switchdev_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack);
++bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
++ enum switchdev_notifier_type nt,
++ const struct switchdev_obj *obj);
+ int switchdev_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 9cbcfb3c95dac..a3840a2749c19 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2343,7 +2343,7 @@ struct tcp_ulp_ops {
+ /* cleanup ulp */
+ void (*release)(struct sock *sk);
+ /* diagnostic */
+- int (*get_info)(const struct sock *sk, struct sk_buff *skb);
++ int (*get_info)(struct sock *sk, struct sk_buff *skb);
+ size_t (*get_info_size)(const struct sock *sk);
+ /* clone ulp */
+ void (*clone)(const struct request_sock *req, struct sock *newsk,
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 8fa1153f37cbf..9c8b6f611330c 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -100,10 +100,6 @@ struct scsi_vpd {
+ unsigned char data[];
+ };
+
+-enum scsi_vpd_parameters {
+- SCSI_VPD_HEADER_SIZE = 4,
+-};
+-
+ struct scsi_device {
+ struct Scsi_Host *host;
+ struct request_queue *request_queue;
+@@ -208,6 +204,7 @@ struct scsi_device {
+ unsigned use_10_for_rw:1; /* first try 10-byte read / write */
+ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
++ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */
+ unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
+ unsigned no_write_same:1; /* no WRITE SAME command */
+ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index ada7acb91a1b7..366df8a1a5fc8 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -3257,6 +3257,11 @@ union bpf_attr {
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
++ * **BPF_FIB_LOOKUP_SRC**
++ * Derive and set source IP addr in *params*->ipv{4,6}_src
++ * for the nexthop. If the src addr cannot be derived,
++ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
++ * case, *params*->dmac and *params*->smac are not set either.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+@@ -6956,6 +6961,7 @@ enum {
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
++ BPF_FIB_LOOKUP_SRC = (1U << 4),
+ };
+
+ enum {
+@@ -6968,6 +6974,7 @@ enum {
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
++ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
+ };
+
+ struct bpf_fib_lookup {
+@@ -7002,6 +7009,9 @@ struct bpf_fib_lookup {
+ __u32 rt_metric;
+ };
+
++ /* input: source address to consider for lookup
++ * output: source address result from lookup
++ */
+ union {
+ __be32 ipv4_src;
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+diff --git a/include/xen/events.h b/include/xen/events.h
+index 23932b0673dc7..7488cd51fbf4f 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -101,8 +101,8 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
+
+ /* Determine the IRQ which is bound to an event channel */
+ unsigned int irq_from_evtchn(evtchn_port_t evtchn);
+-int irq_from_virq(unsigned int cpu, unsigned int virq);
+-evtchn_port_t evtchn_from_irq(unsigned irq);
++int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
++ evtchn_port_t *evtchn);
+
+ int xen_set_callback_via(uint64_t via);
+ int xen_evtchn_do_upcall(void);
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index e68ef39cda675..a5ce840f4fbef 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1100,6 +1100,7 @@ struct bpf_hrtimer {
+ struct bpf_prog *prog;
+ void __rcu *callback_fn;
+ void *value;
++ struct rcu_head rcu;
+ };
+
+ /* the actual struct hidden inside uapi struct bpf_timer */
+@@ -1328,6 +1329,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
+
+ if (in_nmi())
+ return -EOPNOTSUPP;
++ rcu_read_lock();
+ __bpf_spin_lock_irqsave(&timer->lock);
+ t = timer->timer;
+ if (!t) {
+@@ -1349,6 +1351,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
+ * if it was running.
+ */
+ ret = ret ?: hrtimer_cancel(&t->timer);
++ rcu_read_unlock();
+ return ret;
+ }
+
+@@ -1403,7 +1406,7 @@ void bpf_timer_cancel_and_free(void *val)
+ */
+ if (this_cpu_read(hrtimer_running) != t)
+ hrtimer_cancel(&t->timer);
+- kfree(t);
++ kfree_rcu(t, rcu);
+ }
+
+ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 904dd85345973..4ac36eb4cdee5 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -37,6 +37,8 @@ static struct ctl_table sched_rt_sysctls[] = {
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_rt_handler,
++ .extra1 = SYSCTL_ONE,
++ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "sched_rt_runtime_us",
+@@ -44,6 +46,8 @@ static struct ctl_table sched_rt_sysctls[] = {
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sched_rt_handler,
++ .extra1 = SYSCTL_NEG_ONE,
++ .extra2 = SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "sched_rr_timeslice_ms",
+@@ -2989,9 +2993,6 @@ static int sched_rt_global_constraints(void)
+ #ifdef CONFIG_SYSCTL
+ static int sched_rt_global_validate(void)
+ {
+- if (sysctl_sched_rt_period <= 0)
+- return -EINVAL;
+-
+ if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+ ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
+ ((u64)sysctl_sched_rt_runtime *
+@@ -3022,7 +3023,7 @@ static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
+ old_period = sysctl_sched_rt_period;
+ old_runtime = sysctl_sched_rt_runtime;
+
+- ret = proc_dointvec(table, write, buffer, lenp, ppos);
++ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!ret && write) {
+ ret = sched_rt_global_validate();
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 1331f3186f2ad..d2f73bb4121b0 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -2225,6 +2225,7 @@ config TEST_DIV64
+ config TEST_IOV_ITER
+ tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
++ depends on MMU
+ default KUNIT_ALL_TESTS
+ help
+ Enable this to turn on testing of the operation of the I/O iterator
+diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
+index 3071e08e8b8f8..e84495ab92cf3 100644
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -183,9 +183,21 @@ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
+ return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
+ }
+
++static void damon_lru_sort_copy_quota_status(struct damos_quota *dst,
++ struct damos_quota *src)
++{
++ dst->total_charged_sz = src->total_charged_sz;
++ dst->total_charged_ns = src->total_charged_ns;
++ dst->charged_sz = src->charged_sz;
++ dst->charged_from = src->charged_from;
++ dst->charge_target_from = src->charge_target_from;
++ dst->charge_addr_from = src->charge_addr_from;
++}
++
+ static int damon_lru_sort_apply_parameters(void)
+ {
+- struct damos *scheme;
++ struct damos *scheme, *hot_scheme, *cold_scheme;
++ struct damos *old_hot_scheme = NULL, *old_cold_scheme = NULL;
+ unsigned int hot_thres, cold_thres;
+ int err = 0;
+
+@@ -193,18 +205,35 @@ static int damon_lru_sort_apply_parameters(void)
+ if (err)
+ return err;
+
++ damon_for_each_scheme(scheme, ctx) {
++ if (!old_hot_scheme) {
++ old_hot_scheme = scheme;
++ continue;
++ }
++ old_cold_scheme = scheme;
++ }
++
+ hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
+ hot_thres_access_freq / 1000;
+- scheme = damon_lru_sort_new_hot_scheme(hot_thres);
+- if (!scheme)
++ hot_scheme = damon_lru_sort_new_hot_scheme(hot_thres);
++ if (!hot_scheme)
+ return -ENOMEM;
+- damon_set_schemes(ctx, &scheme, 1);
++ if (old_hot_scheme)
++ damon_lru_sort_copy_quota_status(&hot_scheme->quota,
++ &old_hot_scheme->quota);
+
+ cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
+- scheme = damon_lru_sort_new_cold_scheme(cold_thres);
+- if (!scheme)
++ cold_scheme = damon_lru_sort_new_cold_scheme(cold_thres);
++ if (!cold_scheme) {
++ damon_destroy_scheme(hot_scheme);
+ return -ENOMEM;
+- damon_add_scheme(ctx, scheme);
++ }
++ if (old_cold_scheme)
++ damon_lru_sort_copy_quota_status(&cold_scheme->quota,
++ &old_cold_scheme->quota);
++
++ damon_set_schemes(ctx, &hot_scheme, 1);
++ damon_add_scheme(ctx, cold_scheme);
+
+ return damon_set_region_biggest_system_ram_default(target,
+ &monitor_region_start,
+diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
+index 648d2a85523ab..eca9d000ecc53 100644
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -148,9 +148,20 @@ static struct damos *damon_reclaim_new_scheme(void)
+ &damon_reclaim_wmarks);
+ }
+
++static void damon_reclaim_copy_quota_status(struct damos_quota *dst,
++ struct damos_quota *src)
++{
++ dst->total_charged_sz = src->total_charged_sz;
++ dst->total_charged_ns = src->total_charged_ns;
++ dst->charged_sz = src->charged_sz;
++ dst->charged_from = src->charged_from;
++ dst->charge_target_from = src->charge_target_from;
++ dst->charge_addr_from = src->charge_addr_from;
++}
++
+ static int damon_reclaim_apply_parameters(void)
+ {
+- struct damos *scheme;
++ struct damos *scheme, *old_scheme;
+ struct damos_filter *filter;
+ int err = 0;
+
+@@ -162,6 +173,11 @@ static int damon_reclaim_apply_parameters(void)
+ scheme = damon_reclaim_new_scheme();
+ if (!scheme)
+ return -ENOMEM;
++ if (!list_empty(&ctx->schemes)) {
++ damon_for_each_scheme(old_scheme, ctx)
++ damon_reclaim_copy_quota_status(&scheme->quota,
++ &old_scheme->quota);
++ }
+ if (skip_anon) {
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
+ if (!filter) {
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 6d18485571b4a..d630f5c2bdb90 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -180,8 +180,9 @@ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
+ /*
+ * Address comparison utilities
+ */
+-static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+- phys_addr_t base2, phys_addr_t size2)
++unsigned long __init_memblock
++memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2,
++ phys_addr_t size2)
+ {
+ return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 8a881ab21f6cb..dd854cc65fd9d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -7613,9 +7613,13 @@ bool mem_cgroup_swap_full(struct folio *folio)
+
+ static int __init setup_swap_account(char *s)
+ {
+- pr_warn_once("The swapaccount= commandline option is deprecated. "
+- "Please report your usecase to linux-mm@kvack.org if you "
+- "depend on this functionality.\n");
++ bool res;
++
++ if (!kstrtobool(s, &res) && !res)
++ pr_warn_once("The swapaccount=0 commandline option is deprecated "
++ "in favor of configuring swap control via cgroupfs. "
++ "Please report your usecase to linux-mm@kvack.org if you "
++ "depend on this functionality.\n");
+ return 1;
+ }
+ __setup("swapaccount=", setup_swap_account);
+diff --git a/mm/memory.c b/mm/memory.c
+index b3be18f1f1206..78e05d3e9e4ac 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3726,6 +3726,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ struct page *page;
+ struct swap_info_struct *si = NULL;
+ rmap_t rmap_flags = RMAP_NONE;
++ bool need_clear_cache = false;
+ bool exclusive = false;
+ swp_entry_t entry;
+ pte_t pte;
+@@ -3794,6 +3795,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ if (!folio) {
+ if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
+ __swap_count(entry) == 1) {
++ /*
++ * Prevent parallel swapin from proceeding with
++ * the cache flag. Otherwise, another thread may
++ * finish swapin first, free the entry, and swapout
++ * reusing the same entry. It's undetectable as
++ * pte_same() returns true due to entry reuse.
++ */
++ if (swapcache_prepare(entry)) {
++ /* Relax a bit to prevent rapid repeated page faults */
++ schedule_timeout_uninterruptible(1);
++ goto out;
++ }
++ need_clear_cache = true;
++
+ /* skip swapcache */
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
+ vma, vmf->address, false);
+@@ -4040,6 +4055,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ if (vmf->pte)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ out:
++ /* Clear the swap cache pin for direct swapin after PTL unlock */
++ if (need_clear_cache)
++ swapcache_clear(si, entry);
+ if (si)
+ put_swap_device(si);
+ return ret;
+@@ -4054,6 +4072,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ folio_unlock(swapcache);
+ folio_put(swapcache);
+ }
++ if (need_clear_cache)
++ swapcache_clear(si, entry);
+ if (si)
+ put_swap_device(si);
+ return ret;
+diff --git a/mm/swap.h b/mm/swap.h
+index 8a3c7a0ace4f0..693d1b2815598 100644
+--- a/mm/swap.h
++++ b/mm/swap.h
+@@ -38,6 +38,7 @@ void __delete_from_swap_cache(struct folio *folio,
+ void delete_from_swap_cache(struct folio *folio);
+ void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end);
++void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
+ struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr);
+ struct folio *filemap_get_incore_folio(struct address_space *mapping,
+@@ -96,6 +97,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
+ return 0;
+ }
+
++static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
++{
++}
++
+ static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
+ {
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index e52f486834ebf..750314fff0c46 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -3362,6 +3362,19 @@ int swapcache_prepare(swp_entry_t entry)
+ return __swap_duplicate(entry, SWAP_HAS_CACHE);
+ }
+
++void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
++{
++ struct swap_cluster_info *ci;
++ unsigned long offset = swp_offset(entry);
++ unsigned char usage;
++
++ ci = lock_cluster_or_swap_info(si, offset);
++ usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
++ unlock_cluster_or_swap_info(si, ci);
++ if (!usage)
++ free_swap_slot(entry);
++}
++
+ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+ {
+ return swap_type_to_swap_info(swp_type(entry));
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 37d2b1cb2ecb4..69681b9173fdc 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -1100,6 +1100,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
+ if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
+ spin_unlock(&tree->lock);
+ delete_from_swap_cache(page_folio(page));
++ unlock_page(page);
++ put_page(page);
+ ret = -ENOMEM;
+ goto fail;
+ }
+@@ -1215,7 +1217,7 @@ bool zswap_store(struct folio *folio)
+ if (folio_test_large(folio))
+ return false;
+
+- if (!zswap_enabled || !tree)
++ if (!tree)
+ return false;
+
+ /*
+@@ -1231,6 +1233,9 @@ bool zswap_store(struct folio *folio)
+ }
+ spin_unlock(&tree->lock);
+
++ if (!zswap_enabled)
++ return false;
++
+ /*
+ * XXX: zswap reclaim does not work with cgroups yet. Without a
+ * cgroup-aware entry LRU, we will push out entries system-wide based on
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index ee84e783e1dff..7b41ee8740cbb 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -595,21 +595,40 @@ br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
+ }
+
+ static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
++ struct net_device *dev,
++ unsigned long action,
+ enum switchdev_obj_id id,
+ const struct net_bridge_mdb_entry *mp,
+ struct net_device *orig_dev)
+ {
+- struct switchdev_obj_port_mdb *mdb;
++ struct switchdev_obj_port_mdb mdb = {
++ .obj = {
++ .id = id,
++ .orig_dev = orig_dev,
++ },
++ };
++ struct switchdev_obj_port_mdb *pmdb;
+
+- mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
+- if (!mdb)
+- return -ENOMEM;
++ br_switchdev_mdb_populate(&mdb, mp);
++
++ if (action == SWITCHDEV_PORT_OBJ_ADD &&
++ switchdev_port_obj_act_is_deferred(dev, action, &mdb.obj)) {
++ /* This event is already in the deferred queue of
++ * events, so this replay must be elided, lest the
++ * driver receives duplicate events for it. This can
++ * only happen when replaying additions, since
++ * modifications are always immediately visible in
++ * br->mdb_list, whereas actual event delivery may be
++ * delayed.
++ */
++ return 0;
++ }
+
+- mdb->obj.id = id;
+- mdb->obj.orig_dev = orig_dev;
+- br_switchdev_mdb_populate(mdb, mp);
+- list_add_tail(&mdb->obj.list, mdb_list);
++ pmdb = kmemdup(&mdb, sizeof(mdb), GFP_ATOMIC);
++ if (!pmdb)
++ return -ENOMEM;
+
++ list_add_tail(&pmdb->obj.list, mdb_list);
+ return 0;
+ }
+
+@@ -677,51 +696,50 @@ br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ return 0;
+
+- /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
+- * because the write-side protection is br->multicast_lock. But we
+- * need to emulate the [ blocking ] calling context of a regular
+- * switchdev event, so since both br->multicast_lock and RCU read side
+- * critical sections are atomic, we have no choice but to pick the RCU
+- * read side lock, queue up all our events, leave the critical section
+- * and notify switchdev from blocking context.
++ if (adding)
++ action = SWITCHDEV_PORT_OBJ_ADD;
++ else
++ action = SWITCHDEV_PORT_OBJ_DEL;
++
++ /* br_switchdev_mdb_queue_one() will take care to not queue a
++ * replay of an event that is already pending in the switchdev
++ * deferred queue. In order to safely determine that, there
++ * must be no new deferred MDB notifications enqueued for the
++ * duration of the MDB scan. Therefore, grab the write-side
++ * lock to avoid racing with any concurrent IGMP/MLD snooping.
+ */
+- rcu_read_lock();
++ spin_lock_bh(&br->multicast_lock);
+
+- hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
++ hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
+ struct net_bridge_port_group __rcu * const *pp;
+ const struct net_bridge_port_group *p;
+
+ if (mp->host_joined) {
+- err = br_switchdev_mdb_queue_one(&mdb_list,
++ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
+ SWITCHDEV_OBJ_ID_HOST_MDB,
+ mp, br_dev);
+ if (err) {
+- rcu_read_unlock();
++ spin_unlock_bh(&br->multicast_lock);
+ goto out_free_mdb;
+ }
+ }
+
+- for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
++ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+ pp = &p->next) {
+ if (p->key.port->dev != dev)
+ continue;
+
+- err = br_switchdev_mdb_queue_one(&mdb_list,
++ err = br_switchdev_mdb_queue_one(&mdb_list, dev, action,
+ SWITCHDEV_OBJ_ID_PORT_MDB,
+ mp, dev);
+ if (err) {
+- rcu_read_unlock();
++ spin_unlock_bh(&br->multicast_lock);
+ goto out_free_mdb;
+ }
+ }
+ }
+
+- rcu_read_unlock();
+-
+- if (adding)
+- action = SWITCHDEV_PORT_OBJ_ADD;
+- else
+- action = SWITCHDEV_PORT_OBJ_DEL;
++ spin_unlock_bh(&br->multicast_lock);
+
+ list_for_each_entry(obj, &mdb_list, list) {
+ err = br_switchdev_mdb_replay_one(nb, dev,
+@@ -786,6 +804,16 @@ static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
+ br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
+
+ br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
++
++ /* Make sure that the device leaving this bridge has seen all
++ * relevant events before it is disassociated. In the normal
++ * case, when the device is directly attached to the bridge,
++ * this is covered by del_nbp(). If the association was indirect
++ * however, e.g. via a team or bond, and the device is leaving
++ * that intermediate device, then the bridge port remains in
++ * place.
++ */
++ switchdev_deferred_process();
+ }
+
+ /* Let the bridge know that this port is offloaded, so that it can assign a
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 8d9760397b887..3babcd5e65e16 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -5856,8 +5856,8 @@ static int osd_sparse_read(struct ceph_connection *con,
+ struct ceph_osd *o = con->private;
+ struct ceph_sparse_read *sr = &o->o_sparse_read;
+ u32 count = sr->sr_count;
+- u64 eoff, elen;
+- int ret;
++ u64 eoff, elen, len = 0;
++ int i, ret;
+
+ switch (sr->sr_state) {
+ case CEPH_SPARSE_READ_HDR:
+@@ -5909,8 +5909,20 @@ static int osd_sparse_read(struct ceph_connection *con,
+ convert_extent_map(sr);
+ ret = sizeof(sr->sr_datalen);
+ *pbuf = (char *)&sr->sr_datalen;
+- sr->sr_state = CEPH_SPARSE_READ_DATA;
++ sr->sr_state = CEPH_SPARSE_READ_DATA_PRE;
+ break;
++ case CEPH_SPARSE_READ_DATA_PRE:
++ /* Convert sr_datalen to host-endian */
++ sr->sr_datalen = le32_to_cpu((__force __le32)sr->sr_datalen);
++ for (i = 0; i < count; i++)
++ len += sr->sr_extent[i].len;
++ if (sr->sr_datalen != len) {
++ pr_warn_ratelimited("data len %u != extent len %llu\n",
++ sr->sr_datalen, len);
++ return -EREMOTEIO;
++ }
++ sr->sr_state = CEPH_SPARSE_READ_DATA;
++ fallthrough;
+ case CEPH_SPARSE_READ_DATA:
+ if (sr->sr_index >= count) {
+ sr->sr_state = CEPH_SPARSE_READ_HDR;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 01f2417deef21..24f23a30c945e 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5903,6 +5903,9 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ params->rt_metric = res.fi->fib_priority;
+ params->ifindex = dev->ifindex;
+
++ if (flags & BPF_FIB_LOOKUP_SRC)
++ params->ipv4_src = fib_result_prefsrc(net, &res);
++
+ /* xdp and cls_bpf programs are run in RCU-bh so
+ * rcu_read_lock_bh is not needed here
+ */
+@@ -6045,6 +6048,18 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ params->rt_metric = res.f6i->fib6_metric;
+ params->ifindex = dev->ifindex;
+
++ if (flags & BPF_FIB_LOOKUP_SRC) {
++ if (res.f6i->fib6_prefsrc.plen) {
++ *src = res.f6i->fib6_prefsrc.addr;
++ } else {
++ err = ipv6_bpf_stub->ipv6_dev_get_saddr(net, dev,
++ &fl6.daddr, 0,
++ src);
++ if (err)
++ return BPF_FIB_LKUP_RET_NO_SRC_ADDR;
++ }
++ }
++
+ if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
+ goto set_fwd_params;
+
+@@ -6063,7 +6078,8 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ #endif
+
+ #define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
+- BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID)
++ BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID | \
++ BPF_FIB_LOOKUP_SRC)
+
+ BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
+ struct bpf_fib_lookup *, params, int, plen, u32, flags)
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 93ecfceac1bc4..4d75ef9d24bfa 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1226,8 +1226,11 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+- if (psock)
+- psock->saved_data_ready(sk);
++ if (psock) {
++ read_lock_bh(&sk->sk_callback_lock);
++ sk_psock_data_ready(sk, psock);
++ read_unlock_bh(&sk->sk_callback_lock);
++ }
+ rcu_read_unlock();
+ }
+ }
+diff --git a/net/devlink/core.c b/net/devlink/core.c
+index 6cec4afb01fbd..451f2bc141a05 100644
+--- a/net/devlink/core.c
++++ b/net/devlink/core.c
+@@ -308,14 +308,20 @@ static int __init devlink_init(void)
+ {
+ int err;
+
+- err = genl_register_family(&devlink_nl_family);
+- if (err)
+- goto out;
+ err = register_pernet_subsys(&devlink_pernet_ops);
+ if (err)
+ goto out;
++ err = genl_register_family(&devlink_nl_family);
++ if (err)
++ goto out_unreg_pernet_subsys;
+ err = register_netdevice_notifier(&devlink_port_netdevice_nb);
++ if (!err)
++ return 0;
++
++ genl_unregister_family(&devlink_nl_family);
+
++out_unreg_pernet_subsys:
++ unregister_pernet_subsys(&devlink_pernet_ops);
+ out:
+ WARN_ON(err);
+ return err;
+diff --git a/net/devlink/port.c b/net/devlink/port.c
+index 91ba1ca0f3553..9b5ff0fccefdd 100644
+--- a/net/devlink/port.c
++++ b/net/devlink/port.c
+@@ -574,7 +574,7 @@ devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+
+ xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
+ err = devlink_nl_port_fill(msg, devlink_port,
+- DEVLINK_CMD_NEW,
++ DEVLINK_CMD_PORT_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags,
+ cb->extack);
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 9456f5bb35e5d..0d0d725b46ad0 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -1125,7 +1125,8 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
+ if (neigh) {
+ if (!(READ_ONCE(neigh->nud_state) & NUD_NOARP)) {
+ read_lock_bh(&neigh->lock);
+- memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
++ memcpy(r->arp_ha.sa_data, neigh->ha,
++ min(dev->addr_len, sizeof(r->arp_ha.sa_data_min)));
+ r->arp_flags = arp_state_to_flags(neigh);
+ read_unlock_bh(&neigh->lock);
+ r->arp_ha.sa_family = dev->type;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index ca0ff15dc8fa3..bc74f131fe4df 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1825,6 +1825,21 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb,
+ return err;
+ }
+
++/* Combine dev_addr_genid and dev_base_seq to detect changes.
++ */
++static u32 inet_base_seq(const struct net *net)
++{
++ u32 res = atomic_read(&net->ipv4.dev_addr_genid) +
++ net->dev_base_seq;
++
++ /* Must not return 0 (see nl_dump_check_consistent()).
++ * Chose a value far away from 0.
++ */
++ if (!res)
++ res = 0x80000000;
++ return res;
++}
++
+ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ const struct nlmsghdr *nlh = cb->nlh;
+@@ -1876,8 +1891,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+ idx = 0;
+ head = &tgt_net->dev_index_head[h];
+ rcu_read_lock();
+- cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^
+- tgt_net->dev_base_seq;
++ cb->seq = inet_base_seq(tgt_net);
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+@@ -2278,8 +2292,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
+ idx = 0;
+ head = &net->dev_index_head[h];
+ rcu_read_lock();
+- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
+- net->dev_base_seq;
++ cb->seq = inet_base_seq(net);
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index a532f749e4778..9456bf9e2705b 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1131,10 +1131,33 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ return 0;
+
+ error:
++ if (sk_hashed(sk)) {
++ spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash);
++
++ sock_prot_inuse_add(net, sk->sk_prot, -1);
++
++ spin_lock(lock);
++ sk_nulls_del_node_init_rcu(sk);
++ spin_unlock(lock);
++
++ sk->sk_hash = 0;
++ inet_sk(sk)->inet_sport = 0;
++ inet_sk(sk)->inet_num = 0;
++
++ if (tw)
++ inet_twsk_bind_unhash(tw, hinfo);
++ }
++
+ spin_unlock(&head2->lock);
+ if (tb_created)
+ inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
+- spin_unlock_bh(&head->lock);
++ spin_unlock(&head->lock);
++
++ if (tw)
++ inet_twsk_deschedule_put(tw);
++
++ local_bh_enable();
++
+ return -ENOMEM;
+ }
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index b007d098ffe2e..7881446a46c4f 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -706,6 +706,22 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ return err;
+ }
+
++/* Combine dev_addr_genid and dev_base_seq to detect changes.
++ */
++static u32 inet6_base_seq(const struct net *net)
++{
++ u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
++ net->dev_base_seq;
++
++ /* Must not return 0 (see nl_dump_check_consistent()).
++ * Chose a value far away from 0.
++ */
++ if (!res)
++ res = 0x80000000;
++ return res;
++}
++
++
+ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+ struct netlink_callback *cb)
+ {
+@@ -739,8 +755,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
+ idx = 0;
+ head = &net->dev_index_head[h];
+ rcu_read_lock();
+- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
+- net->dev_base_seq;
++ cb->seq = inet6_base_seq(net);
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+@@ -5358,7 +5373,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
+ }
+
+ rcu_read_lock();
+- cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
++ cb->seq = inet6_base_seq(tgt_net);
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &tgt_net->dev_index_head[h];
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 4375bfa4f6089..b9c50cceba568 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -1064,6 +1064,7 @@ static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
+ .udp6_lib_lookup = __udp6_lib_lookup,
+ .ipv6_setsockopt = do_ipv6_setsockopt,
+ .ipv6_getsockopt = do_ipv6_getsockopt,
++ .ipv6_dev_get_saddr = ipv6_dev_get_saddr,
+ };
+
+ static int __init inet6_init(void)
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 4952ae7924505..02e9ffb63af19 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -177,6 +177,8 @@ static bool ip6_parse_tlv(bool hopbyhop,
+ case IPV6_TLV_IOAM:
+ if (!ipv6_hop_ioam(skb, off))
+ return false;
++
++ nh = skb_network_header(skb);
+ break;
+ case IPV6_TLV_JUMBO:
+ if (!ipv6_hop_jumbo(skb, off))
+@@ -943,6 +945,14 @@ static bool ipv6_hop_ioam(struct sk_buff *skb, int optoff)
+ if (!skb_valid_dst(skb))
+ ip6_route_input(skb);
+
++ /* About to mangle packet header */
++ if (skb_ensure_writable(skb, optoff + 2 + hdr->opt_len))
++ goto drop;
++
++ /* Trace pointer may have changed */
++ trace = (struct ioam6_trace_hdr *)(skb_network_header(skb)
++ + optoff + sizeof(*hdr));
++
+ ioam6_fill_trace_data(skb, ns, trace, true);
+ break;
+ default:
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index 29346a6eec9ff..35508abd76f43 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -512,22 +512,24 @@ int __init seg6_init(void)
+ {
+ int err;
+
+- err = genl_register_family(&seg6_genl_family);
++ err = register_pernet_subsys(&ip6_segments_ops);
+ if (err)
+ goto out;
+
+- err = register_pernet_subsys(&ip6_segments_ops);
++ err = genl_register_family(&seg6_genl_family);
+ if (err)
+- goto out_unregister_genl;
++ goto out_unregister_pernet;
+
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ err = seg6_iptunnel_init();
+ if (err)
+- goto out_unregister_pernet;
++ goto out_unregister_genl;
+
+ err = seg6_local_init();
+- if (err)
+- goto out_unregister_pernet;
++ if (err) {
++ seg6_iptunnel_exit();
++ goto out_unregister_genl;
++ }
+ #endif
+
+ #ifdef CONFIG_IPV6_SEG6_HMAC
+@@ -548,11 +550,11 @@ int __init seg6_init(void)
+ #endif
+ #endif
+ #ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+-out_unregister_pernet:
+- unregister_pernet_subsys(&ip6_segments_ops);
+-#endif
+ out_unregister_genl:
+ genl_unregister_family(&seg6_genl_family);
++#endif
++out_unregister_pernet:
++ unregister_pernet_subsys(&ip6_segments_ops);
+ goto out;
+ }
+
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 11f3d375cec00..db4971d52802b 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -627,7 +627,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+
+ back_from_confirm:
+ lock_sock(sk);
+- ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
++ ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
+ err = ip6_append_data(sk, ip_generic_getfrag, msg,
+ ulen, transhdrlen, &ipc6,
+ &fl6, (struct rt6_info *)dst,
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index f7cb50b0dd4ed..daf5212e283dd 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1887,6 +1887,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
+ sband->band);
+ }
+
++ ieee80211_sta_set_rx_nss(link_sta);
++
+ return ret;
+ }
+
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index d9e716f38b0e9..c6044ab4e7fc1 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -7800,6 +7800,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ ieee80211_report_disconnect(sdata, frame_buf,
+ sizeof(frame_buf), true,
+ req->reason_code, false);
++ drv_mgd_complete_tx(sdata->local, sdata, &info);
+ return 0;
+ }
+
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 68ec2124c3db5..a52813f2b08cb 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -9,7 +9,7 @@
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2013-2015 Intel Mobile Communications GmbH
+ * Copyright 2016-2017 Intel Deutschland GmbH
+- * Copyright (C) 2018-2023 Intel Corporation
++ * Copyright (C) 2018-2024 Intel Corporation
+ */
+
+ #include <linux/if_arp.h>
+@@ -222,14 +222,18 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
+ }
+
+ static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata,
++ struct ieee80211_channel *channel,
+ u32 scan_flags, const u8 *da)
+ {
+ if (!sdata)
+ return false;
+- /* accept broadcast for OCE */
+- if (scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP &&
+- is_broadcast_ether_addr(da))
++
++ /* accept broadcast on 6 GHz and for OCE */
++ if (is_broadcast_ether_addr(da) &&
++ (channel->band == NL80211_BAND_6GHZ ||
++ scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP))
+ return true;
++
+ if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
+ return true;
+ return ether_addr_equal(da, sdata->vif.addr);
+@@ -278,6 +282,12 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0);
+ }
+
++ channel = ieee80211_get_channel_khz(local->hw.wiphy,
++ ieee80211_rx_status_to_khz(rx_status));
++
++ if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
++ return;
++
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ struct cfg80211_scan_request *scan_req;
+ struct cfg80211_sched_scan_request *sched_scan_req;
+@@ -295,19 +305,15 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
+ /* ignore ProbeResp to foreign address or non-bcast (OCE)
+ * unless scanning with randomised address
+ */
+- if (!ieee80211_scan_accept_presp(sdata1, scan_req_flags,
++ if (!ieee80211_scan_accept_presp(sdata1, channel,
++ scan_req_flags,
+ mgmt->da) &&
+- !ieee80211_scan_accept_presp(sdata2, sched_scan_req_flags,
++ !ieee80211_scan_accept_presp(sdata2, channel,
++ sched_scan_req_flags,
+ mgmt->da))
+ return;
+ }
+
+- channel = ieee80211_get_channel_khz(local->hw.wiphy,
+- ieee80211_rx_status_to_khz(rx_status));
+-
+- if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+- return;
+-
+ bss = ieee80211_bss_info_update(local, rx_status,
+ mgmt, skb->len,
+ channel);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index e112300caaf75..c61eb867bb4a7 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -914,6 +914,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_accept_plinks_update(sdata);
+
++ ieee80211_check_fast_xmit(sta);
++
+ return 0;
+ out_remove:
+ if (sta->sta.valid_links)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 5ab9594ae119e..5c6c5254d987f 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3034,7 +3034,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
+ sdata->vif.type == NL80211_IFTYPE_STATION)
+ goto out;
+
+- if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
++ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
+ goto out;
+
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index 7a47a58aa54b4..6218dcd07e184 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -663,7 +663,7 @@ struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk,
+ spin_unlock_irqrestore(&mns->keys_lock, flags);
+
+ if (!tagbits) {
+- kfree(key);
++ mctp_key_unref(key);
+ return ERR_PTR(-EBUSY);
+ }
+
+diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c
+index a536586742f28..6ff6f14674aa2 100644
+--- a/net/mptcp/diag.c
++++ b/net/mptcp/diag.c
+@@ -13,17 +13,19 @@
+ #include <uapi/linux/mptcp.h>
+ #include "protocol.h"
+
+-static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
++static int subflow_get_info(struct sock *sk, struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *sf;
+ struct nlattr *start;
+ u32 flags = 0;
++ bool slow;
+ int err;
+
+ start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP);
+ if (!start)
+ return -EMSGSIZE;
+
++ slow = lock_sock_fast(sk);
+ rcu_read_lock();
+ sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
+ if (!sf) {
+@@ -63,17 +65,19 @@ static int subflow_get_info(const struct sock *sk, struct sk_buff *skb)
+ sf->map_data_len) ||
+ nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) ||
+ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) ||
+- nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, sf->local_id)) {
++ nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) {
+ err = -EMSGSIZE;
+ goto nla_failure;
+ }
+
+ rcu_read_unlock();
++ unlock_sock_fast(sk, slow);
+ nla_nest_end(skb, start);
+ return 0;
+
+ nla_failure:
+ rcu_read_unlock();
++ unlock_sock_fast(sk, slow);
+ nla_nest_cancel(skb, start);
+ return err;
+ }
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index 74698582a2859..ad28da655f8bc 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -59,13 +59,12 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
+ mptcp_data_unlock(sk);
+ }
+
+-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt)
++void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt)
+ {
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *skb;
+
+- mptcp_data_lock(sk);
+ skb = skb_peek_tail(&sk->sk_receive_queue);
+ if (skb) {
+ WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
+@@ -77,5 +76,4 @@ void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_
+ }
+
+ pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
+- mptcp_data_unlock(sk);
+ }
+diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
+index a0990c365a2ea..c30405e768337 100644
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -66,6 +66,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
+ SNMP_MIB_ITEM("RcvWndShared", MPTCP_MIB_RCVWNDSHARED),
+ SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
+ SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
++ SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
+ SNMP_MIB_SENTINEL
+ };
+
+diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
+index cae71d9472529..dd7fd1f246b5f 100644
+--- a/net/mptcp/mib.h
++++ b/net/mptcp/mib.h
+@@ -65,6 +65,7 @@ enum linux_mptcp_mib_field {
+ * conflict with another subflow while updating msk rcv wnd
+ */
+ MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
++ MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
+ __MPTCP_MIB_MAX
+ };
+
+@@ -95,4 +96,11 @@ static inline void __MPTCP_INC_STATS(struct net *net,
+ __SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+ }
+
++static inline void MPTCP_DEC_STATS(struct net *net,
++ enum linux_mptcp_mib_field field)
++{
++ if (likely(net->mib.mptcp_statistics))
++ SNMP_DEC_STATS(net->mib.mptcp_statistics, field);
++}
++
+ bool mptcp_mib_alloc(struct net *net);
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index d2527d189a799..e3e96a49f9229 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -962,9 +962,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ /* subflows are fully established as soon as we get any
+ * additional ack, including ADD_ADDR.
+ */
+- subflow->fully_established = 1;
+- WRITE_ONCE(msk->fully_established, true);
+- goto check_notify;
++ goto set_fully_established;
+ }
+
+ /* If the first established packet does not contain MP_CAPABLE + data
+@@ -986,7 +984,10 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ set_fully_established:
+ if (unlikely(!READ_ONCE(msk->pm.server_side)))
+ pr_warn_once("bogus mpc option on established client sk");
+- mptcp_subflow_fully_established(subflow, mp_opt);
++
++ mptcp_data_lock((struct sock *)msk);
++ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
++ mptcp_data_unlock((struct sock *)msk);
+
+ check_notify:
+ /* if the subflow is not already linked into the conn_list, we can't
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 3011bc378462b..4dd47a1fb9aa6 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -396,19 +396,6 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ }
+ }
+
+-static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
+- const struct mptcp_addr_info *addr)
+-{
+- int i;
+-
+- for (i = 0; i < nr; i++) {
+- if (addrs[i].id == addr->id)
+- return true;
+- }
+-
+- return false;
+-}
+-
+ /* Fill all the remote addresses into the array addrs[],
+ * and return the array size.
+ */
+@@ -440,18 +427,34 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk,
+ msk->pm.subflows++;
+ addrs[i++] = remote;
+ } else {
++ DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
++
++ /* Forbid creation of new subflows matching existing
++ * ones, possibly already created by incoming ADD_ADDR
++ */
++ bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1);
++ mptcp_for_each_subflow(msk, subflow)
++ if (READ_ONCE(subflow->local_id) == local->id)
++ __set_bit(subflow->remote_id, unavail_id);
++
+ mptcp_for_each_subflow(msk, subflow) {
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ remote_address((struct sock_common *)ssk, &addrs[i]);
+- addrs[i].id = subflow->remote_id;
++ addrs[i].id = READ_ONCE(subflow->remote_id);
+ if (deny_id0 && !addrs[i].id)
+ continue;
+
++ if (test_bit(addrs[i].id, unavail_id))
++ continue;
++
+ if (!mptcp_pm_addr_families_match(sk, local, &addrs[i]))
+ continue;
+
+- if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
+- msk->pm.subflows < subflows_max) {
++ if (msk->pm.subflows < subflows_max) {
++ /* forbid creating multiple address towards
++ * this id
++ */
++ __set_bit(addrs[i].id, unavail_id);
+ msk->pm.subflows++;
+ i++;
+ }
+@@ -799,18 +802,18 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ u8 remote_id = READ_ONCE(subflow->remote_id);
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+- u8 id = subflow->local_id;
++ u8 id = subflow_get_local_id(subflow);
+
+- if (rm_type == MPTCP_MIB_RMADDR && subflow->remote_id != rm_id)
++ if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ continue;
+ if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
+ continue;
+
+ pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u",
+ rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
+- i, rm_id, subflow->local_id, subflow->remote_id,
+- msk->mpc_endpoint_id);
++ i, rm_id, id, remote_id, msk->mpc_endpoint_id);
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, how);
+
+@@ -901,7 +904,8 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
+ }
+
+ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+- struct mptcp_pm_addr_entry *entry)
++ struct mptcp_pm_addr_entry *entry,
++ bool needs_id)
+ {
+ struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
+ unsigned int addr_max;
+@@ -949,7 +953,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ }
+ }
+
+- if (!entry->addr.id) {
++ if (!entry->addr.id && needs_id) {
+ find_next:
+ entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
+ MPTCP_PM_MAX_ADDR_ID + 1,
+@@ -960,7 +964,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
+ }
+ }
+
+- if (!entry->addr.id)
++ if (!entry->addr.id && needs_id)
+ goto out;
+
+ __set_bit(entry->addr.id, pernet->id_bitmap);
+@@ -1048,6 +1052,11 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ if (err)
+ return err;
+
++ /* We don't use mptcp_set_state() here because it needs to be called
++ * under the msk socket lock. For the moment, that will not bring
++ * anything more than only calling inet_sk_state_store(), because the
++ * old status is known (TCP_CLOSE).
++ */
+ inet_sk_state_store(newsk, TCP_LISTEN);
+ lock_sock(ssk);
+ err = __inet_listen_sk(ssk, backlog);
+@@ -1087,7 +1096,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
+ entry->ifindex = 0;
+ entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
+ entry->lsk = NULL;
+- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
++ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true);
+ if (ret < 0)
+ kfree(entry);
+
+@@ -1303,6 +1312,18 @@ static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
+ return 0;
+ }
+
++static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr,
++ struct genl_info *info)
++{
++ struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
++
++ if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
++ mptcp_pm_addr_policy, info->extack) &&
++ tb[MPTCP_PM_ADDR_ATTR_ID])
++ return true;
++ return false;
++}
++
+ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ {
+ struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+@@ -1344,7 +1365,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+ goto out_free;
+ }
+ }
+- ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
++ ret = mptcp_pm_nl_append_new_local_addr(pernet, entry,
++ !mptcp_pm_has_addr_attr_id(attr, info));
+ if (ret < 0) {
+ GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret);
+ goto out_free;
+@@ -2000,7 +2022,7 @@ static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
+ if (WARN_ON_ONCE(!sf))
+ return -EINVAL;
+
+- if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
++ if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, subflow_get_local_id(sf)))
+ return -EMSGSIZE;
+
+ if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
+diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
+index c1717322c8922..ecd166ce047dd 100644
+--- a/net/mptcp/pm_userspace.c
++++ b/net/mptcp/pm_userspace.c
+@@ -26,7 +26,8 @@ void mptcp_free_local_addr_list(struct mptcp_sock *msk)
+ }
+
+ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+- struct mptcp_pm_addr_entry *entry)
++ struct mptcp_pm_addr_entry *entry,
++ bool needs_id)
+ {
+ DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+ struct mptcp_pm_addr_entry *match = NULL;
+@@ -41,7 +42,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) {
+ addr_match = mptcp_addresses_equal(&e->addr, &entry->addr, true);
+- if (addr_match && entry->addr.id == 0)
++ if (addr_match && entry->addr.id == 0 && needs_id)
+ entry->addr.id = e->addr.id;
+ id_match = (e->addr.id == entry->addr.id);
+ if (addr_match && id_match) {
+@@ -64,7 +65,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
+ }
+
+ *e = *entry;
+- if (!e->addr.id)
++ if (!e->addr.id && needs_id)
+ e->addr.id = find_next_zero_bit(id_bitmap,
+ MPTCP_PM_MAX_ADDR_ID + 1,
+ 1);
+@@ -153,7 +154,7 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
+ if (new_entry.addr.port == msk_sport)
+ new_entry.addr.port = 0;
+
+- return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
++ return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
+ }
+
+ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+@@ -195,7 +196,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ goto announce_err;
+ }
+
+- err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val);
++ err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "did not match address and id");
+ goto announce_err;
+@@ -219,6 +220,40 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+ return err;
+ }
+
++static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk,
++ struct genl_info *info)
++{
++ struct mptcp_rm_list list = { .nr = 0 };
++ struct mptcp_subflow_context *subflow;
++ struct sock *sk = (struct sock *)msk;
++ bool has_id_0 = false;
++ int err = -EINVAL;
++
++ lock_sock(sk);
++ mptcp_for_each_subflow(msk, subflow) {
++ if (subflow->local_id == 0) {
++ has_id_0 = true;
++ break;
++ }
++ }
++ if (!has_id_0) {
++ GENL_SET_ERR_MSG(info, "address with id 0 not found");
++ goto remove_err;
++ }
++
++ list.ids[list.nr++] = 0;
++
++ spin_lock_bh(&msk->pm.lock);
++ mptcp_pm_remove_addr(msk, &list);
++ spin_unlock_bh(&msk->pm.lock);
++
++ err = 0;
++
++remove_err:
++ release_sock(sk);
++ return err;
++}
++
+ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ {
+ struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
+@@ -250,6 +285,11 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+ goto remove_err;
+ }
+
++ if (id_val == 0) {
++ err = mptcp_userspace_pm_remove_id_zero_address(msk, info);
++ goto remove_err;
++ }
++
+ lock_sock((struct sock *)msk);
+
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+@@ -333,7 +373,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+ }
+
+ local.addr = addr_l;
+- err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
++ err = mptcp_userspace_pm_append_new_local_addr(msk, &local, false);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "did not match address and id");
+ goto create_err;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 9d4d5dbdbb53b..d4ee0a6bdc86c 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -99,7 +99,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
+ subflow->subflow_id = msk->subflow_id++;
+
+ /* This is the first subflow, always with id 0 */
+- subflow->local_id_valid = 1;
++ WRITE_ONCE(subflow->local_id, 0);
+ mptcp_sock_graft(msk->first, sk->sk_socket);
+ iput(SOCK_INODE(ssock));
+
+@@ -445,11 +445,11 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
+
+ switch (sk->sk_state) {
+ case TCP_FIN_WAIT1:
+- inet_sk_state_store(sk, TCP_FIN_WAIT2);
++ mptcp_set_state(sk, TCP_FIN_WAIT2);
+ break;
+ case TCP_CLOSING:
+ case TCP_LAST_ACK:
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ break;
+ }
+
+@@ -610,13 +610,13 @@ static bool mptcp_check_data_fin(struct sock *sk)
+
+ switch (sk->sk_state) {
+ case TCP_ESTABLISHED:
+- inet_sk_state_store(sk, TCP_CLOSE_WAIT);
++ mptcp_set_state(sk, TCP_CLOSE_WAIT);
+ break;
+ case TCP_FIN_WAIT1:
+- inet_sk_state_store(sk, TCP_CLOSING);
++ mptcp_set_state(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ break;
+ default:
+ /* Other states not expected */
+@@ -791,7 +791,7 @@ static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
+ */
+ ssk_state = inet_sk_state_load(ssk);
+ if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+- inet_sk_state_store(sk, ssk_state);
++ mptcp_set_state(sk, ssk_state);
+ WRITE_ONCE(sk->sk_err, -err);
+
+ /* This barrier is coupled with smp_rmb() in mptcp_poll() */
+@@ -2470,7 +2470,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ inet_sk_state_load(msk->first) == TCP_CLOSE) {
+ if (sk->sk_state != TCP_ESTABLISHED ||
+ msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ mptcp_close_wake_up(sk);
+ } else {
+ mptcp_start_tout_timer(sk);
+@@ -2565,7 +2565,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+ WRITE_ONCE(sk->sk_err, ECONNRESET);
+ }
+
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+ smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+ set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
+@@ -2700,7 +2700,7 @@ static void mptcp_do_fastclose(struct sock *sk)
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ mptcp_for_each_subflow_safe(msk, subflow, tmp)
+ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
+ subflow, MPTCP_CF_FASTCLOSE);
+@@ -2877,6 +2877,24 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ release_sock(ssk);
+ }
+
++void mptcp_set_state(struct sock *sk, int state)
++{
++ int oldstate = sk->sk_state;
++
++ switch (state) {
++ case TCP_ESTABLISHED:
++ if (oldstate != TCP_ESTABLISHED)
++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
++ break;
++
++ default:
++ if (oldstate == TCP_ESTABLISHED)
++ MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
++ }
++
++ inet_sk_state_store(sk, state);
++}
++
+ static const unsigned char new_state[16] = {
+ /* current state: new state: action: */
+ [0 /* (Invalid) */] = TCP_CLOSE,
+@@ -2899,7 +2917,7 @@ static int mptcp_close_state(struct sock *sk)
+ int next = (int)new_state[sk->sk_state];
+ int ns = next & TCP_STATE_MASK;
+
+- inet_sk_state_store(sk, ns);
++ mptcp_set_state(sk, ns);
+
+ return next & TCP_ACTION_FIN;
+ }
+@@ -3017,7 +3035,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
+ mptcp_check_listen_stop(sk);
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ goto cleanup;
+ }
+
+@@ -3060,7 +3078,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+ * state, let's not keep resources busy for no reasons
+ */
+ if (subflows_alive == 0)
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+
+ sock_hold(sk);
+ pr_debug("msk=%p state=%d", sk, sk->sk_state);
+@@ -3126,7 +3144,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+ return -EBUSY;
+
+ mptcp_check_listen_stop(sk);
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+
+ mptcp_stop_rtx_timer(sk);
+ mptcp_stop_tout_timer(sk);
+@@ -3177,6 +3195,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ {
+ struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
++ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk;
+
+ if (!nsk)
+@@ -3213,11 +3232,12 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ /* this can't race with mptcp_close(), as the msk is
+ * not yet exposted to user-space
+ */
+- inet_sk_state_store(nsk, TCP_ESTABLISHED);
++ mptcp_set_state(nsk, TCP_ESTABLISHED);
+
+ /* The msk maintain a ref to each subflow in the connections list */
+ WRITE_ONCE(msk->first, ssk);
+- list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list);
++ subflow = mptcp_subflow_ctx(ssk);
++ list_add(&subflow->node, &msk->conn_list);
+ sock_hold(ssk);
+
+ /* new mpc subflow takes ownership of the newly
+@@ -3232,6 +3252,9 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ __mptcp_propagate_sndbuf(nsk, ssk);
+
+ mptcp_rcv_space_init(msk, ssk);
++
++ if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
++ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ bh_unlock_sock(nsk);
+
+ /* note: the newly allocated socket refcount is 2 now */
+@@ -3507,10 +3530,6 @@ void mptcp_finish_connect(struct sock *ssk)
+ * accessing the field below
+ */
+ WRITE_ONCE(msk->local_key, subflow->local_key);
+- WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
+- WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+- WRITE_ONCE(msk->snd_una, msk->write_seq);
+- WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
+
+ mptcp_pm_new_connection(msk, ssk, 0);
+ }
+@@ -3668,7 +3687,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(ssk))
+ return PTR_ERR(ssk);
+
+- inet_sk_state_store(sk, TCP_SYN_SENT);
++ mptcp_set_state(sk, TCP_SYN_SENT);
+ subflow = mptcp_subflow_ctx(ssk);
+ #ifdef CONFIG_TCP_MD5SIG
+ /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -3718,7 +3737,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (unlikely(err)) {
+ /* avoid leaving a dangling token in an unconnected socket */
+ mptcp_token_destroy(msk);
+- inet_sk_state_store(sk, TCP_CLOSE);
++ mptcp_set_state(sk, TCP_CLOSE);
+ return err;
+ }
+
+@@ -3808,13 +3827,13 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ goto unlock;
+ }
+
+- inet_sk_state_store(sk, TCP_LISTEN);
++ mptcp_set_state(sk, TCP_LISTEN);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
+ lock_sock(ssk);
+ err = __inet_listen_sk(ssk, backlog);
+ release_sock(ssk);
+- inet_sk_state_store(sk, inet_sk_state_load(ssk));
++ mptcp_set_state(sk, inet_sk_state_load(ssk));
+
+ if (!err) {
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -3874,7 +3893,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ __mptcp_close_ssk(newsk, msk->first,
+ mptcp_subflow_ctx(msk->first), 0);
+ if (unlikely(list_is_singular(&msk->conn_list)))
+- inet_sk_state_store(newsk, TCP_CLOSE);
++ mptcp_set_state(newsk, TCP_CLOSE);
+ }
+ }
+ release_sock(newsk);
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 094d3fd47a92f..55536ac835d90 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -493,10 +493,9 @@ struct mptcp_subflow_context {
+ remote_key_valid : 1, /* received the peer key from */
+ disposable : 1, /* ctx can be free at ulp release time */
+ stale : 1, /* unable to snd/rcv data, do not use for xmit */
+- local_id_valid : 1, /* local_id is correctly initialized */
+ valid_csum_seen : 1, /* at least one csum validated */
+ is_mptfo : 1, /* subflow is doing TFO */
+- __unused : 9;
++ __unused : 10;
+ enum mptcp_data_avail data_avail;
+ bool scheduled;
+ u32 remote_nonce;
+@@ -507,7 +506,7 @@ struct mptcp_subflow_context {
+ u8 hmac[MPTCPOPT_HMAC_LEN]; /* MPJ subflow only */
+ u64 iasn; /* initial ack sequence number, MPC subflows only */
+ };
+- u8 local_id;
++ s16 local_id; /* if negative not initialized yet */
+ u8 remote_id;
+ u8 reset_seen:1;
+ u8 reset_transient:1;
+@@ -558,6 +557,7 @@ mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
+ {
+ memset(&subflow->reset, 0, sizeof(subflow->reset));
+ subflow->request_mptcp = 1;
++ WRITE_ONCE(subflow->local_id, -1);
+ }
+
+ static inline u64
+@@ -623,8 +623,9 @@ int mptcp_allow_join_id0(const struct net *net);
+ unsigned int mptcp_stale_loss_cnt(const struct net *net);
+ int mptcp_get_pm_type(const struct net *net);
+ const char *mptcp_get_scheduler(const struct net *net);
+-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt);
++void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
++ struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt);
+ bool __mptcp_retransmit_pending_data(struct sock *sk);
+ void mptcp_check_and_set_pending(struct sock *sk);
+ void __mptcp_push_pending(struct sock *sk, unsigned int flags);
+@@ -642,6 +643,7 @@ bool __mptcp_close(struct sock *sk, long timeout);
+ void mptcp_cancel_work(struct sock *sk);
+ void __mptcp_unaccepted_force_close(struct sock *sk);
+ void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
++void mptcp_set_state(struct sock *sk, int state);
+
+ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+ const struct mptcp_addr_info *b, bool use_port);
+@@ -937,8 +939,8 @@ void mptcp_event_pm_listener(const struct sock *ssk,
+ enum mptcp_event_type event);
+ bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
+
+-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt);
++void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt);
+ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
+ struct request_sock *req);
+
+@@ -1006,6 +1008,15 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
+ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+
++static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
++{
++ int local_id = READ_ONCE(subflow->local_id);
++
++ if (local_id < 0)
++ return 0;
++ return local_id;
++}
++
+ void __init mptcp_pm_nl_init(void);
+ void mptcp_pm_nl_work(struct mptcp_sock *msk);
+ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 8c7e22a9a37bd..ab41700bee688 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -421,31 +421,26 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
+
+ void __mptcp_sync_state(struct sock *sk, int state)
+ {
++ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
++ struct sock *ssk = msk->first;
+
+- __mptcp_propagate_sndbuf(sk, msk->first);
++ subflow = mptcp_subflow_ctx(ssk);
++ __mptcp_propagate_sndbuf(sk, ssk);
+ if (!msk->rcvspace_init)
+- mptcp_rcv_space_init(msk, msk->first);
++ mptcp_rcv_space_init(msk, ssk);
++
+ if (sk->sk_state == TCP_SYN_SENT) {
+- inet_sk_state_store(sk, state);
++ /* subflow->idsn is always available is TCP_SYN_SENT state,
++ * even for the FASTOPEN scenarios
++ */
++ WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
++ WRITE_ONCE(msk->snd_nxt, msk->write_seq);
++ mptcp_set_state(sk, state);
+ sk->sk_state_change(sk);
+ }
+ }
+
+-static void mptcp_propagate_state(struct sock *sk, struct sock *ssk)
+-{
+- struct mptcp_sock *msk = mptcp_sk(sk);
+-
+- mptcp_data_lock(sk);
+- if (!sock_owned_by_user(sk)) {
+- __mptcp_sync_state(sk, ssk->sk_state);
+- } else {
+- msk->pending_state = ssk->sk_state;
+- __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
+- }
+- mptcp_data_unlock(sk);
+-}
+-
+ static void subflow_set_remote_key(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow,
+ const struct mptcp_options_received *mp_opt)
+@@ -467,6 +462,31 @@ static void subflow_set_remote_key(struct mptcp_sock *msk,
+ atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
+ }
+
++static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
++ struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt)
++{
++ struct mptcp_sock *msk = mptcp_sk(sk);
++
++ mptcp_data_lock(sk);
++ if (mp_opt) {
++ /* Options are available only in the non fallback cases
++ * avoid updating rx path fields otherwise
++ */
++ WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
++ WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
++ subflow_set_remote_key(msk, subflow, mp_opt);
++ }
++
++ if (!sock_owned_by_user(sk)) {
++ __mptcp_sync_state(sk, ssk->sk_state);
++ } else {
++ msk->pending_state = ssk->sk_state;
++ __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
++ }
++ mptcp_data_unlock(sk);
++}
++
+ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ {
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+@@ -501,10 +521,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ if (mp_opt.deny_join_id0)
+ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+ subflow->mp_capable = 1;
+- subflow_set_remote_key(msk, subflow, &mp_opt);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
+ mptcp_finish_connect(sk);
+- mptcp_propagate_state(parent, sk);
++ mptcp_propagate_state(parent, sk, subflow, &mp_opt);
+ } else if (subflow->request_join) {
+ u8 hmac[SHA256_DIGEST_SIZE];
+
+@@ -516,7 +535,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ subflow->backup = mp_opt.backup;
+ subflow->thmac = mp_opt.thmac;
+ subflow->remote_nonce = mp_opt.nonce;
+- subflow->remote_id = mp_opt.join_id;
++ WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
+ pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
+ subflow, subflow->thmac, subflow->remote_nonce,
+ subflow->backup);
+@@ -547,7 +566,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ }
+ } else if (mptcp_check_fallback(sk)) {
+ fallback:
+- mptcp_propagate_state(parent, sk);
++ mptcp_propagate_state(parent, sk, subflow, NULL);
+ }
+ return;
+
+@@ -558,8 +577,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+
+ static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
+ {
+- subflow->local_id = local_id;
+- subflow->local_id_valid = 1;
++ WARN_ON_ONCE(local_id < 0 || local_id > 255);
++ WRITE_ONCE(subflow->local_id, local_id);
+ }
+
+ static int subflow_chk_local_id(struct sock *sk)
+@@ -568,7 +587,7 @@ static int subflow_chk_local_id(struct sock *sk)
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ int err;
+
+- if (likely(subflow->local_id_valid))
++ if (likely(subflow->local_id >= 0))
+ return 0;
+
+ err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
+@@ -732,17 +751,16 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
+ kfree_rcu(ctx, rcu);
+ }
+
+-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+- const struct mptcp_options_received *mp_opt)
++void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
++ struct mptcp_subflow_context *subflow,
++ const struct mptcp_options_received *mp_opt)
+ {
+- struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+-
+ subflow_set_remote_key(msk, subflow, mp_opt);
+ subflow->fully_established = 1;
+ WRITE_ONCE(msk->fully_established, true);
+
+ if (subflow->is_mptfo)
+- mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
++ __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
+ }
+
+ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+@@ -835,7 +853,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ * mpc option
+ */
+ if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
+- mptcp_subflow_fully_established(ctx, &mp_opt);
+ mptcp_pm_fully_established(owner, child);
+ ctx->pm_notified = 1;
+ }
+@@ -1544,7 +1561,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ remote_token, local_id, remote_id);
+ subflow->remote_token = remote_token;
+- subflow->remote_id = remote_id;
++ WRITE_ONCE(subflow->remote_id, remote_id);
+ subflow->request_join = 1;
+ subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ subflow->subflow_id = msk->subflow_id++;
+@@ -1706,6 +1723,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
+ pr_debug("subflow=%p", ctx);
+
+ ctx->tcp_sock = sk;
++ WRITE_ONCE(ctx->local_id, -1);
+
+ return ctx;
+ }
+@@ -1739,7 +1757,7 @@ static void subflow_state_change(struct sock *sk)
+ mptcp_do_fallback(sk);
+ pr_fallback(msk);
+ subflow->conn_finished = 1;
+- mptcp_propagate_state(parent, sk);
++ mptcp_propagate_state(parent, sk, subflow, NULL);
+ }
+
+ /* as recvmsg() does not acquire the subflow socket for ssk selection
+@@ -1941,14 +1959,14 @@ static void subflow_ulp_clone(const struct request_sock *req,
+ new_ctx->idsn = subflow_req->idsn;
+
+ /* this is the first subflow, id is always 0 */
+- new_ctx->local_id_valid = 1;
++ subflow_set_local_id(new_ctx, 0);
+ } else if (subflow_req->mp_join) {
+ new_ctx->ssn_offset = subflow_req->ssn_offset;
+ new_ctx->mp_join = 1;
+ new_ctx->fully_established = 1;
+ new_ctx->remote_key_valid = 1;
+ new_ctx->backup = subflow_req->backup;
+- new_ctx->remote_id = subflow_req->remote_id;
++ WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
+ new_ctx->token = subflow_req->token;
+ new_ctx->thmac = subflow_req->thmac;
+
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index c6bd533983c1f..4cc97f971264e 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -283,7 +283,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ pr_debug("Setting vtag %x for secondary conntrack\n",
+ sh->vtag);
+ ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
+- } else {
++ } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) {
+ /* If it is a shutdown ack OOTB packet, we expect a return
+ shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+ pr_debug("Setting vtag %x for new conn OOTB\n",
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 920a5a29ae1dc..a0571339239c4 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -87,12 +87,22 @@ static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
+ return 0;
+ }
+
++static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
++ enum flow_offload_tuple_dir dir)
++{
++ struct dst_entry *dst = route->tuple[dir].dst;
++
++ route->tuple[dir].dst = NULL;
++
++ return dst;
++}
++
+ static int flow_offload_fill_route(struct flow_offload *flow,
+- const struct nf_flow_route *route,
++ struct nf_flow_route *route,
+ enum flow_offload_tuple_dir dir)
+ {
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+- struct dst_entry *dst = route->tuple[dir].dst;
++ struct dst_entry *dst = nft_route_dst_fetch(route, dir);
+ int i, j = 0;
+
+ switch (flow_tuple->l3proto) {
+@@ -122,6 +132,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
+ ETH_ALEN);
+ flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
+ flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
++ dst_release(dst);
+ break;
+ case FLOW_OFFLOAD_XMIT_XFRM:
+ case FLOW_OFFLOAD_XMIT_NEIGH:
+@@ -146,7 +157,7 @@ static void nft_flow_dst_release(struct flow_offload *flow,
+ }
+
+ void flow_offload_route_init(struct flow_offload *flow,
+- const struct nf_flow_route *route)
++ struct nf_flow_route *route)
+ {
+ flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
+ flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index cb7d42a3faab3..8808d78d65235 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -686,15 +686,16 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
+ return err;
+ }
+
+-static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+- struct nft_flowtable *flowtable)
++static struct nft_trans *
++nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
++ struct nft_flowtable *flowtable)
+ {
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type,
+ sizeof(struct nft_trans_flowtable));
+ if (trans == NULL)
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+
+ if (msg_type == NFT_MSG_NEWFLOWTABLE)
+ nft_activate_next(ctx->net, flowtable);
+@@ -703,22 +704,22 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+ nft_trans_flowtable(trans) = flowtable;
+ nft_trans_commit_list_add_tail(ctx->net, trans);
+
+- return 0;
++ return trans;
+ }
+
+ static int nft_delflowtable(struct nft_ctx *ctx,
+ struct nft_flowtable *flowtable)
+ {
+- int err;
++ struct nft_trans *trans;
+
+- err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+- if (err < 0)
+- return err;
++ trans = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
++ if (IS_ERR(trans))
++ return PTR_ERR(trans);
+
+ nft_deactivate_next(ctx->net, flowtable);
+ nft_use_dec(&ctx->table->use);
+
+- return err;
++ return 0;
+ }
+
+ static void __nft_reg_track_clobber(struct nft_regs_track *track, u8 dreg)
+@@ -1253,6 +1254,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
+ return 0;
+
+ err_register_hooks:
++ ctx->table->flags |= NFT_TABLE_F_DORMANT;
+ nft_trans_destroy(trans);
+ return ret;
+ }
+@@ -2082,7 +2084,7 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net,
+ struct nft_hook *hook;
+ int err;
+
+- hook = kmalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
++ hook = kzalloc(sizeof(struct nft_hook), GFP_KERNEL_ACCOUNT);
+ if (!hook) {
+ err = -ENOMEM;
+ goto err_hook_alloc;
+@@ -2505,19 +2507,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ RCU_INIT_POINTER(chain->blob_gen_0, blob);
+ RCU_INIT_POINTER(chain->blob_gen_1, blob);
+
+- err = nf_tables_register_hook(net, table, chain);
+- if (err < 0)
+- goto err_destroy_chain;
+-
+ if (!nft_use_inc(&table->use)) {
+ err = -EMFILE;
+- goto err_use;
++ goto err_destroy_chain;
+ }
+
+ trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+- goto err_unregister_hook;
++ goto err_trans;
+ }
+
+ nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
+@@ -2525,17 +2523,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
+ nft_trans_chain_policy(trans) = policy;
+
+ err = nft_chain_add(table, chain);
+- if (err < 0) {
+- nft_trans_destroy(trans);
+- goto err_unregister_hook;
+- }
++ if (err < 0)
++ goto err_chain_add;
++
++ /* This must be LAST to ensure no packets are walking over this chain. */
++ err = nf_tables_register_hook(net, table, chain);
++ if (err < 0)
++ goto err_register_hook;
+
+ return 0;
+
+-err_unregister_hook:
++err_register_hook:
++ nft_chain_del(chain);
++err_chain_add:
++ nft_trans_destroy(trans);
++err_trans:
+ nft_use_dec_restore(&table->use);
+-err_use:
+- nf_tables_unregister_hook(net, table, chain);
+ err_destroy_chain:
+ nf_tables_chain_destroy(ctx);
+
+@@ -8333,9 +8336,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ u8 family = info->nfmsg->nfgen_family;
+ const struct nf_flowtable_type *type;
+ struct nft_flowtable *flowtable;
+- struct nft_hook *hook, *next;
+ struct net *net = info->net;
+ struct nft_table *table;
++ struct nft_trans *trans;
+ struct nft_ctx ctx;
+ int err;
+
+@@ -8415,34 +8418,34 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
+ extack, true);
+ if (err < 0)
+- goto err4;
++ goto err_flowtable_parse_hooks;
+
+ list_splice(&flowtable_hook.list, &flowtable->hook_list);
+ flowtable->data.priority = flowtable_hook.priority;
+ flowtable->hooknum = flowtable_hook.num;
+
++ trans = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
++ if (IS_ERR(trans)) {
++ err = PTR_ERR(trans);
++ goto err_flowtable_trans;
++ }
++
++ /* This must be LAST to ensure no packets are walking over this flowtable. */
+ err = nft_register_flowtable_net_hooks(ctx.net, table,
+ &flowtable->hook_list,
+ flowtable);
+- if (err < 0) {
+- nft_hooks_destroy(&flowtable->hook_list);
+- goto err4;
+- }
+-
+- err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+ if (err < 0)
+- goto err5;
++ goto err_flowtable_hooks;
+
+ list_add_tail_rcu(&flowtable->list, &table->flowtables);
+
+ return 0;
+-err5:
+- list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+- nft_unregister_flowtable_hook(net, flowtable, hook);
+- list_del_rcu(&hook->list);
+- kfree_rcu(hook, rcu);
+- }
+-err4:
++
++err_flowtable_hooks:
++ nft_trans_destroy(trans);
++err_flowtable_trans:
++ nft_hooks_destroy(&flowtable->hook_list);
++err_flowtable_parse_hooks:
+ flowtable->data.type->free(&flowtable->data);
+ err3:
+ module_put(type->owner);
+diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
+index 3aa50dc7535b7..976fe250b5095 100644
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -34,10 +34,10 @@ static int pn_ioctl(struct sock *sk, int cmd, int *karg)
+
+ switch (cmd) {
+ case SIOCINQ:
+- lock_sock(sk);
++ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ *karg = skb ? skb->len : 0;
+- release_sock(sk);
++ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ return 0;
+
+ case SIOCPNADDRESOURCE:
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index faba31f2eff29..3dd5f52bc1b58 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -917,6 +917,37 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+ return 0;
+ }
+
++static unsigned int pep_first_packet_length(struct sock *sk)
++{
++ struct pep_sock *pn = pep_sk(sk);
++ struct sk_buff_head *q;
++ struct sk_buff *skb;
++ unsigned int len = 0;
++ bool found = false;
++
++ if (sock_flag(sk, SOCK_URGINLINE)) {
++ q = &pn->ctrlreq_queue;
++ spin_lock_bh(&q->lock);
++ skb = skb_peek(q);
++ if (skb) {
++ len = skb->len;
++ found = true;
++ }
++ spin_unlock_bh(&q->lock);
++ }
++
++ if (likely(!found)) {
++ q = &sk->sk_receive_queue;
++ spin_lock_bh(&q->lock);
++ skb = skb_peek(q);
++ if (skb)
++ len = skb->len;
++ spin_unlock_bh(&q->lock);
++ }
++
++ return len;
++}
++
+ static int pep_ioctl(struct sock *sk, int cmd, int *karg)
+ {
+ struct pep_sock *pn = pep_sk(sk);
+@@ -929,15 +960,7 @@ static int pep_ioctl(struct sock *sk, int cmd, int *karg)
+ break;
+ }
+
+- lock_sock(sk);
+- if (sock_flag(sk, SOCK_URGINLINE) &&
+- !skb_queue_empty(&pn->ctrlreq_queue))
+- *karg = skb_peek(&pn->ctrlreq_queue)->len;
+- else if (!skb_queue_empty(&sk->sk_receive_queue))
+- *karg = skb_peek(&sk->sk_receive_queue)->len;
+- else
+- *karg = 0;
+- release_sock(sk);
++ *karg = pep_first_packet_length(sk);
+ ret = 0;
+ break;
+
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 0a711c184c29b..674f7ae356ca2 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -206,18 +206,14 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
+ return err;
+ }
+
+-static bool is_mirred_nested(void)
+-{
+- return unlikely(__this_cpu_read(mirred_nest_level) > 1);
+-}
+-
+-static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
++static int
++tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
+ {
+ int err;
+
+ if (!want_ingress)
+ err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
+- else if (is_mirred_nested())
++ else if (!at_ingress)
+ err = netif_rx(skb);
+ else
+ err = netif_receive_skb(skb);
+@@ -225,110 +221,123 @@ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+ return err;
+ }
+
+-TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+- const struct tc_action *a,
+- struct tcf_result *res)
++static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
++ struct net_device *dev,
++ const bool m_mac_header_xmit, int m_eaction,
++ int retval)
+ {
+- struct tcf_mirred *m = to_mirred(a);
+- struct sk_buff *skb2 = skb;
+- bool m_mac_header_xmit;
+- struct net_device *dev;
+- unsigned int nest_level;
+- int retval, err = 0;
+- bool use_reinsert;
++ struct sk_buff *skb_to_send = skb;
+ bool want_ingress;
+ bool is_redirect;
+ bool expects_nh;
+ bool at_ingress;
+- int m_eaction;
++ bool dont_clone;
+ int mac_len;
+ bool at_nh;
++ int err;
+
+- nest_level = __this_cpu_inc_return(mirred_nest_level);
+- if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
+- net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+- netdev_name(skb->dev));
+- __this_cpu_dec(mirred_nest_level);
+- return TC_ACT_SHOT;
+- }
+-
+- tcf_lastuse_update(&m->tcf_tm);
+- tcf_action_update_bstats(&m->common, skb);
+-
+- m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
+- m_eaction = READ_ONCE(m->tcfm_eaction);
+- retval = READ_ONCE(m->tcf_action);
+- dev = rcu_dereference_bh(m->tcfm_dev);
+- if (unlikely(!dev)) {
+- pr_notice_once("tc mirred: target device is gone\n");
+- goto out;
+- }
+-
++ is_redirect = tcf_mirred_is_act_redirect(m_eaction);
+ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
+ net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ dev->name);
+- goto out;
++ goto err_cant_do;
+ }
+
+ /* we could easily avoid the clone only if called by ingress and clsact;
+ * since we can't easily detect the clsact caller, skip clone only for
+ * ingress - that covers the TC S/W datapath.
+ */
+- is_redirect = tcf_mirred_is_act_redirect(m_eaction);
+ at_ingress = skb_at_tc_ingress(skb);
+- use_reinsert = at_ingress && is_redirect &&
+- tcf_mirred_can_reinsert(retval);
+- if (!use_reinsert) {
+- skb2 = skb_clone(skb, GFP_ATOMIC);
+- if (!skb2)
+- goto out;
++ dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
++ tcf_mirred_can_reinsert(retval);
++ if (!dont_clone) {
++ skb_to_send = skb_clone(skb, GFP_ATOMIC);
++ if (!skb_to_send)
++ goto err_cant_do;
+ }
+
+ want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
+
+ /* All mirred/redirected skbs should clear previous ct info */
+- nf_reset_ct(skb2);
++ nf_reset_ct(skb_to_send);
+ if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
+- skb_dst_drop(skb2);
++ skb_dst_drop(skb_to_send);
+
+ expects_nh = want_ingress || !m_mac_header_xmit;
+ at_nh = skb->data == skb_network_header(skb);
+ if (at_nh != expects_nh) {
+- mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
++ mac_len = at_ingress ? skb->mac_len :
+ skb_network_offset(skb);
+ if (expects_nh) {
+ /* target device/action expect data at nh */
+- skb_pull_rcsum(skb2, mac_len);
++ skb_pull_rcsum(skb_to_send, mac_len);
+ } else {
+ /* target device/action expect data at mac */
+- skb_push_rcsum(skb2, mac_len);
++ skb_push_rcsum(skb_to_send, mac_len);
+ }
+ }
+
+- skb2->skb_iif = skb->dev->ifindex;
+- skb2->dev = dev;
++ skb_to_send->skb_iif = skb->dev->ifindex;
++ skb_to_send->dev = dev;
+
+- /* mirror is always swallowed */
+ if (is_redirect) {
+- skb_set_redirected(skb2, skb2->tc_at_ingress);
+-
+- /* let's the caller reinsert the packet, if possible */
+- if (use_reinsert) {
+- err = tcf_mirred_forward(want_ingress, skb);
+- if (err)
+- tcf_action_inc_overlimit_qstats(&m->common);
+- __this_cpu_dec(mirred_nest_level);
+- return TC_ACT_CONSUMED;
+- }
++ if (skb == skb_to_send)
++ retval = TC_ACT_CONSUMED;
++
++ skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
++
++ err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
++ } else {
++ err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
+ }
++ if (err)
++ tcf_action_inc_overlimit_qstats(&m->common);
++
++ return retval;
++
++err_cant_do:
++ if (is_redirect)
++ retval = TC_ACT_SHOT;
++ tcf_action_inc_overlimit_qstats(&m->common);
++ return retval;
++}
++
++TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
++ const struct tc_action *a,
++ struct tcf_result *res)
++{
++ struct tcf_mirred *m = to_mirred(a);
++ int retval = READ_ONCE(m->tcf_action);
++ unsigned int nest_level;
++ bool m_mac_header_xmit;
++ struct net_device *dev;
++ int m_eaction;
+
+- err = tcf_mirred_forward(want_ingress, skb2);
+- if (err) {
+-out:
++ nest_level = __this_cpu_inc_return(mirred_nest_level);
++ if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
++ net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
++ netdev_name(skb->dev));
++ retval = TC_ACT_SHOT;
++ goto dec_nest_level;
++ }
++
++ tcf_lastuse_update(&m->tcf_tm);
++ tcf_action_update_bstats(&m->common, skb);
++
++ dev = rcu_dereference_bh(m->tcfm_dev);
++ if (unlikely(!dev)) {
++ pr_notice_once("tc mirred: target device is gone\n");
+ tcf_action_inc_overlimit_qstats(&m->common);
+- if (tcf_mirred_is_act_redirect(m_eaction))
+- retval = TC_ACT_SHOT;
++ goto dec_nest_level;
+ }
++
++ m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
++ m_eaction = READ_ONCE(m->tcfm_eaction);
++
++ retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
++ retval);
++
++dec_nest_level:
+ __this_cpu_dec(mirred_nest_level);
+
+ return retval;
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index efb9d2811b73d..6ee7064c82fcc 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2460,8 +2460,11 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
+ }
+
+ errout_idr:
+- if (!fold)
++ if (!fold) {
++ spin_lock(&tp->lock);
+ idr_remove(&head->handle_idr, fnew->handle);
++ spin_unlock(&tp->lock);
++ }
+ __fl_put(fnew);
+ errout_tb:
+ kfree(tb);
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index 5b045284849e0..c9189a970eec3 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -19,6 +19,35 @@
+ #include <linux/rtnetlink.h>
+ #include <net/switchdev.h>
+
++static bool switchdev_obj_eq(const struct switchdev_obj *a,
++ const struct switchdev_obj *b)
++{
++ const struct switchdev_obj_port_vlan *va, *vb;
++ const struct switchdev_obj_port_mdb *ma, *mb;
++
++ if (a->id != b->id || a->orig_dev != b->orig_dev)
++ return false;
++
++ switch (a->id) {
++ case SWITCHDEV_OBJ_ID_PORT_VLAN:
++ va = SWITCHDEV_OBJ_PORT_VLAN(a);
++ vb = SWITCHDEV_OBJ_PORT_VLAN(b);
++ return va->flags == vb->flags &&
++ va->vid == vb->vid &&
++ va->changed == vb->changed;
++ case SWITCHDEV_OBJ_ID_PORT_MDB:
++ case SWITCHDEV_OBJ_ID_HOST_MDB:
++ ma = SWITCHDEV_OBJ_PORT_MDB(a);
++ mb = SWITCHDEV_OBJ_PORT_MDB(b);
++ return ma->vid == mb->vid &&
++ ether_addr_equal(ma->addr, mb->addr);
++ default:
++ break;
++ }
++
++ BUG();
++}
++
+ static LIST_HEAD(deferred);
+ static DEFINE_SPINLOCK(deferred_lock);
+
+@@ -307,6 +336,50 @@ int switchdev_port_obj_del(struct net_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+
++/**
++ * switchdev_port_obj_act_is_deferred - Is object action pending?
++ *
++ * @dev: port device
++ * @nt: type of action; add or delete
++ * @obj: object to test
++ *
++ * Returns true if a deferred item is pending, which is
++ * equivalent to the action @nt on an object @obj.
++ *
++ * rtnl_lock must be held.
++ */
++bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
++ enum switchdev_notifier_type nt,
++ const struct switchdev_obj *obj)
++{
++ struct switchdev_deferred_item *dfitem;
++ bool found = false;
++
++ ASSERT_RTNL();
++
++ spin_lock_bh(&deferred_lock);
++
++ list_for_each_entry(dfitem, &deferred, list) {
++ if (dfitem->dev != dev)
++ continue;
++
++ if ((dfitem->func == switchdev_port_obj_add_deferred &&
++ nt == SWITCHDEV_PORT_OBJ_ADD) ||
++ (dfitem->func == switchdev_port_obj_del_deferred &&
++ nt == SWITCHDEV_PORT_OBJ_DEL)) {
++ if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
++ found = true;
++ break;
++ }
++ }
++ }
++
++ spin_unlock_bh(&deferred_lock);
++
++ return found;
++}
++EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
++
+ static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
+ static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
+
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 002483e60c190..e97fcb502115e 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -1001,7 +1001,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx)
+ return 0;
+ }
+
+-static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
++static int tls_get_info(struct sock *sk, struct sk_buff *skb)
+ {
+ u16 version, cipher_type;
+ struct tls_context *ctx;
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index e1f8ff6e9a739..5238886e61860 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1772,7 +1772,8 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+ u8 *control,
+ size_t skip,
+ size_t len,
+- bool is_peek)
++ bool is_peek,
++ bool *more)
+ {
+ struct sk_buff *skb = skb_peek(&ctx->rx_list);
+ struct tls_msg *tlm;
+@@ -1785,7 +1786,7 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+- goto out;
++ goto more;
+
+ if (skip < rxm->full_len)
+ break;
+@@ -1803,12 +1804,12 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+
+ err = tls_record_content_type(msg, tlm, control);
+ if (err <= 0)
+- goto out;
++ goto more;
+
+ err = skb_copy_datagram_msg(skb, rxm->offset + skip,
+ msg, chunk);
+ if (err < 0)
+- goto out;
++ goto more;
+
+ len = len - chunk;
+ copied = copied + chunk;
+@@ -1844,6 +1845,10 @@ static int process_rx_list(struct tls_sw_context_rx *ctx,
+
+ out:
+ return copied ? : err;
++more:
++ if (more)
++ *more = true;
++ goto out;
+ }
+
+ static bool
+@@ -1947,6 +1952,7 @@ int tls_sw_recvmsg(struct sock *sk,
+ int target, err;
+ bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
+ bool is_peek = flags & MSG_PEEK;
++ bool rx_more = false;
+ bool released = true;
+ bool bpf_strp_enabled;
+ bool zc_capable;
+@@ -1966,12 +1972,12 @@ int tls_sw_recvmsg(struct sock *sk,
+ goto end;
+
+ /* Process pending decrypted records. It must be non-zero-copy */
+- err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
++ err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
+ if (err < 0)
+ goto end;
+
+ copied = err;
+- if (len <= copied)
++ if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
+ goto end;
+
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+@@ -2064,6 +2070,8 @@ int tls_sw_recvmsg(struct sock *sk,
+ decrypted += chunk;
+ len -= chunk;
+ __skb_queue_tail(&ctx->rx_list, skb);
++ if (unlikely(control != TLS_RECORD_TYPE_DATA))
++ break;
+ continue;
+ }
+
+@@ -2128,10 +2136,10 @@ int tls_sw_recvmsg(struct sock *sk,
+ /* Drain records from the rx_list & copy if required */
+ if (is_peek || is_kvec)
+ err = process_rx_list(ctx, msg, &control, copied,
+- decrypted, is_peek);
++ decrypted, is_peek, NULL);
+ else
+ err = process_rx_list(ctx, msg, &control, 0,
+- async_copy_bytes, is_peek);
++ async_copy_bytes, is_peek, NULL);
+ }
+
+ copied += decrypted;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 0b0dfecedc50a..c8bfacd5c8f3d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4012,6 +4012,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
+ if_idx++;
+ }
+
++ if_start = 0;
+ wp_idx++;
+ }
+ out:
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index d849dc04a3343..2c3ba42bfcdcb 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -683,7 +683,8 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ memcpy(vaddr, buffer, len);
+ kunmap_local(vaddr);
+
+- skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
++ skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
++ refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
+ }
+ }
+
+diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
+index 61b7dddedc461..0669bac5e900e 100755
+--- a/scripts/bpf_doc.py
++++ b/scripts/bpf_doc.py
+@@ -513,7 +513,7 @@ eBPF programs can have an associated license, passed along with the bytecode
+ instructions to the kernel when the programs are loaded. The format for that
+ string is identical to the one in use for kernel modules (Dual licenses, such
+ as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
+-programs that are compatible with the GNU Privacy License (GPL).
++programs that are compatible with the GNU General Public License (GNU GPL).
+
+ In order to use such helpers, the eBPF program must be loaded with the correct
+ license string passed (via **attr**) to the **bpf**\\ () system call, and this
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 55d3a78112e05..a6a9d353fe635 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1735,9 +1735,11 @@ static int default_bdl_pos_adj(struct azx *chip)
+ /* some exceptions: Atoms seem problematic with value 1 */
+ if (chip->pci->vendor == PCI_VENDOR_ID_INTEL) {
+ switch (chip->pci->device) {
+- case 0x0f04: /* Baytrail */
+- case 0x2284: /* Braswell */
++ case PCI_DEVICE_ID_INTEL_HDA_BYT:
++ case PCI_DEVICE_ID_INTEL_HDA_BSW:
+ return 32;
++ case PCI_DEVICE_ID_INTEL_HDA_APL:
++ return 64;
+ }
+ }
+
+diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
+index a06af82b80565..fc4e91535578b 100644
+--- a/sound/soc/amd/acp/acp-mach-common.c
++++ b/sound/soc/amd/acp/acp-mach-common.c
+@@ -1416,8 +1416,13 @@ int acp_sofdsp_dai_links_create(struct snd_soc_card *card)
+ if (drv_data->amp_cpu_id == I2S_SP) {
+ links[i].name = "acp-amp-codec";
+ links[i].id = AMP_BE_ID;
+- links[i].cpus = sof_sp_virtual;
+- links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
++ if (drv_data->platform == RENOIR) {
++ links[i].cpus = sof_sp;
++ links[i].num_cpus = ARRAY_SIZE(sof_sp);
++ } else {
++ links[i].cpus = sof_sp_virtual;
++ links[i].num_cpus = ARRAY_SIZE(sof_sp_virtual);
++ }
+ links[i].platforms = sof_component;
+ links[i].num_platforms = ARRAY_SIZE(sof_component);
+ links[i].dpcm_playback = 1;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index cb654f1b09f10..72b90a7ee4b68 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -739,19 +739,25 @@ static int wm_adsp_request_firmware_file(struct wm_adsp *dsp,
+ const char *filetype)
+ {
+ struct cs_dsp *cs_dsp = &dsp->cs_dsp;
++ const char *fwf;
+ char *s, c;
+ int ret = 0;
+
++ if (dsp->fwf_name)
++ fwf = dsp->fwf_name;
++ else
++ fwf = dsp->cs_dsp.name;
++
+ if (system_name && asoc_component_prefix)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s-%s.%s", dir, dsp->part,
+- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
++ fwf, wm_adsp_fw[dsp->fw].file, system_name,
+ asoc_component_prefix, filetype);
+ else if (system_name)
+ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s-%s.%s", dir, dsp->part,
+- dsp->fwf_name, wm_adsp_fw[dsp->fw].file, system_name,
++ fwf, wm_adsp_fw[dsp->fw].file, system_name,
+ filetype);
+ else
+- *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, dsp->fwf_name,
++ *filename = kasprintf(GFP_KERNEL, "%s%s-%s-%s.%s", dir, dsp->part, fwf,
+ wm_adsp_fw[dsp->fw].file, filetype);
+
+ if (*filename == NULL)
+@@ -863,29 +869,18 @@ static int wm_adsp_request_firmware_files(struct wm_adsp *dsp,
+ }
+
+ adsp_err(dsp, "Failed to request firmware <%s>%s-%s-%s<-%s<%s>>.wmfw\n",
+- cirrus_dir, dsp->part, dsp->fwf_name, wm_adsp_fw[dsp->fw].file,
+- system_name, asoc_component_prefix);
++ cirrus_dir, dsp->part,
++ dsp->fwf_name ? dsp->fwf_name : dsp->cs_dsp.name,
++ wm_adsp_fw[dsp->fw].file, system_name, asoc_component_prefix);
+
+ return -ENOENT;
+ }
+
+ static int wm_adsp_common_init(struct wm_adsp *dsp)
+ {
+- char *p;
+-
+ INIT_LIST_HEAD(&dsp->compr_list);
+ INIT_LIST_HEAD(&dsp->buffer_list);
+
+- if (!dsp->fwf_name) {
+- p = devm_kstrdup(dsp->cs_dsp.dev, dsp->cs_dsp.name, GFP_KERNEL);
+- if (!p)
+- return -ENOMEM;
+-
+- dsp->fwf_name = p;
+- for (; *p != 0; ++p)
+- *p = tolower(*p);
+- }
+-
+ return 0;
+ }
+
+diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
+index b849bb7cf58e2..2347aeb049bcc 100644
+--- a/sound/soc/sunxi/sun4i-spdif.c
++++ b/sound/soc/sunxi/sun4i-spdif.c
+@@ -578,6 +578,11 @@ static const struct of_device_id sun4i_spdif_of_match[] = {
+ .compatible = "allwinner,sun50i-h6-spdif",
+ .data = &sun50i_h6_spdif_quirks,
+ },
++ {
++ .compatible = "allwinner,sun50i-h616-spdif",
++ /* Essentially the same as the H6, but without RX */
++ .data = &sun50i_h6_spdif_quirks,
++ },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match);
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 33db334e65566..a676ad093d189 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -328,8 +328,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR)
+ return ret;
+ err = uac_clock_selector_set_val(chip, entity_id, cur);
+- if (err < 0)
++ if (err < 0) {
++ if (pins == 1) {
++ usb_audio_dbg(chip,
++ "%s(): selector returned an error, "
++ "assuming a firmware bug, id %d, ret %d\n",
++ __func__, clock_id, err);
++ return ret;
++ }
+ return err;
++ }
+ }
+
+ if (!validate || ret > 0 || !chip->autoclock)
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index ab5fed9f55b60..3b45d0ee76938 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -470,9 +470,11 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
+ int clock)
+ {
+ struct usb_device *dev = chip->dev;
++ struct usb_host_interface *alts;
+ unsigned int *table;
+ unsigned int nr_rates;
+ int i, err;
++ u32 bmControls;
+
+ /* performing the rate verification may lead to unexpected USB bus
+ * behavior afterwards by some unknown reason. Do this only for the
+@@ -481,6 +483,24 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
+ if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES))
+ return 0; /* don't perform the validation as default */
+
++ alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting);
++ if (!alts)
++ return 0;
++
++ if (fp->protocol == UAC_VERSION_3) {
++ struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc(
++ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
++ bmControls = le32_to_cpu(as->bmControls);
++ } else {
++ struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(
++ alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
++ bmControls = as->bmControls;
++ }
++
++ if (!uac_v2v3_control_is_readable(bmControls,
++ UAC2_AS_VAL_ALT_SETTINGS))
++ return 0;
++
+ table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index ada7acb91a1b7..366df8a1a5fc8 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -3257,6 +3257,11 @@ union bpf_attr {
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
++ * **BPF_FIB_LOOKUP_SRC**
++ * Derive and set source IP addr in *params*->ipv{4,6}_src
++ * for the nexthop. If the src addr cannot be derived,
++ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
++ * case, *params*->dmac and *params*->smac are not set either.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+@@ -6956,6 +6961,7 @@ enum {
+ BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
++ BPF_FIB_LOOKUP_SRC = (1U << 4),
+ };
+
+ enum {
+@@ -6968,6 +6974,7 @@ enum {
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
++ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
+ };
+
+ struct bpf_fib_lookup {
+@@ -7002,6 +7009,9 @@ struct bpf_fib_lookup {
+ __u32 rt_metric;
+ };
+
++ /* input: source address to consider for lookup
++ * output: source address result from lookup
++ */
+ union {
+ __be32 ipv4_src;
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
+index 514e0d69e7310..11a7a889d279c 100644
+--- a/tools/net/ynl/lib/ynl.c
++++ b/tools/net/ynl/lib/ynl.c
+@@ -450,6 +450,8 @@ ynl_gemsg_start_dump(struct ynl_sock *ys, __u32 id, __u8 cmd, __u8 version)
+
+ int ynl_recv_ack(struct ynl_sock *ys, int ret)
+ {
++ struct ynl_parse_arg yarg = { .ys = ys, };
++
+ if (!ret) {
+ yerr(ys, YNL_ERROR_EXPECT_ACK,
+ "Expecting an ACK but nothing received");
+@@ -462,7 +464,7 @@ int ynl_recv_ack(struct ynl_sock *ys, int ret)
+ return ret;
+ }
+ return mnl_cb_run(ys->rx_buf, ret, ys->seq, ys->portid,
+- ynl_cb_null, ys);
++ ynl_cb_null, &yarg);
+ }
+
+ int ynl_cb_null(const struct nlmsghdr *nlh, void *data)
+@@ -570,7 +572,13 @@ static int ynl_sock_read_family(struct ynl_sock *ys, const char *family_name)
+ return err;
+ }
+
+- return ynl_recv_ack(ys, err);
++ err = ynl_recv_ack(ys, err);
++ if (err < 0) {
++ free(ys->mcast_groups);
++ return err;
++ }
++
++ return 0;
+ }
+
+ struct ynl_sock *
+@@ -725,11 +733,14 @@ static int ynl_ntf_parse(struct ynl_sock *ys, const struct nlmsghdr *nlh)
+
+ static int ynl_ntf_trampoline(const struct nlmsghdr *nlh, void *data)
+ {
+- return ynl_ntf_parse((struct ynl_sock *)data, nlh);
++ struct ynl_parse_arg *yarg = data;
++
++ return ynl_ntf_parse(yarg->ys, nlh);
+ }
+
+ int ynl_ntf_check(struct ynl_sock *ys)
+ {
++ struct ynl_parse_arg yarg = { .ys = ys, };
+ ssize_t len;
+ int err;
+
+@@ -751,7 +762,7 @@ int ynl_ntf_check(struct ynl_sock *ys)
+ return len;
+
+ err = mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,
+- ynl_ntf_trampoline, ys,
++ ynl_ntf_trampoline, &yarg,
+ ynl_cb_array, NLMSG_MIN_TYPE);
+ if (err < 0)
+ return err;
+diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+index d508486cc0bdc..9a3d3c389dadd 100755
+--- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh
++++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh
+@@ -62,6 +62,8 @@ prio_test()
+
+ # create bond
+ bond_reset "${param}"
++ # set active_slave to primary eth1 specifically
++ ip -n ${s_ns} link set bond0 type bond active_slave eth1
+
+ # check bonding member prio value
+ ip -n ${s_ns} link set eth0 type bond_slave prio 0
+diff --git a/tools/testing/selftests/iommu/config b/tools/testing/selftests/iommu/config
+index 6c4f901d6fed3..110d73917615d 100644
+--- a/tools/testing/selftests/iommu/config
++++ b/tools/testing/selftests/iommu/config
+@@ -1,2 +1,3 @@
+-CONFIG_IOMMUFD
+-CONFIG_IOMMUFD_TEST
++CONFIG_IOMMUFD=y
++CONFIG_FAULT_INJECTION=y
++CONFIG_IOMMUFD_TEST=y
+diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
+index 2709a34a39c52..2c68709062da5 100644
+--- a/tools/testing/selftests/mm/uffd-unit-tests.c
++++ b/tools/testing/selftests/mm/uffd-unit-tests.c
+@@ -1309,6 +1309,12 @@ int main(int argc, char *argv[])
+ continue;
+
+ uffd_test_start("%s on %s", test->name, mem_type->name);
++ if ((mem_type->mem_flag == MEM_HUGETLB ||
++ mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
++ (default_huge_page_size() == 0)) {
++ uffd_test_skip("huge page size is 0, feature missing?");
++ continue;
++ }
+ if (!uffd_feature_supported(test)) {
+ uffd_test_skip("feature missing");
+ continue;
+diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
+index b0f5e55d2d0b2..5896296365022 100755
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -235,9 +235,6 @@ mirred_egress_to_ingress_tcp_test()
+ check_err $? "didn't mirred redirect ICMP"
+ tc_check_packets "dev $h1 ingress" 102 10
+ check_err $? "didn't drop mirred ICMP"
+- local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
+- test ${overlimits} = 10
+- check_err $? "wrong overlimits, expected 10 got ${overlimits}"
+
+ tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
+ tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
+diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
+index 85a8ee9395b39..4d8c59be1b30c 100755
+--- a/tools/testing/selftests/net/mptcp/diag.sh
++++ b/tools/testing/selftests/net/mptcp/diag.sh
+@@ -56,14 +56,14 @@ __chk_nr()
+ local command="$1"
+ local expected=$2
+ local msg="$3"
+- local skip="${4:-SKIP}"
++ local skip="${4-SKIP}"
+ local nr
+
+ nr=$(eval $command)
+
+ printf "%-50s" "$msg"
+- if [ $nr != $expected ]; then
+- if [ $nr = "$skip" ] && ! mptcp_lib_expect_all_features; then
++ if [ "$nr" != "$expected" ]; then
++ if [ "$nr" = "$skip" ] && ! mptcp_lib_expect_all_features; then
+ echo "[ skip ] Feature probably not supported"
+ mptcp_lib_result_skip "${msg}"
+ else
+@@ -166,9 +166,13 @@ chk_msk_listen()
+ chk_msk_inuse()
+ {
+ local expected=$1
+- local msg="$2"
++ local msg="....chk ${2:-${expected}} msk in use"
+ local listen_nr
+
++ if [ "${expected}" -eq 0 ]; then
++ msg+=" after flush"
++ fi
++
+ listen_nr=$(ss -N "${ns}" -Ml | grep -c LISTEN)
+ expected=$((expected + listen_nr))
+
+@@ -179,7 +183,7 @@ chk_msk_inuse()
+ sleep 0.1
+ done
+
+- __chk_nr get_msk_inuse $expected "$msg" 0
++ __chk_nr get_msk_inuse $expected "${msg}" 0
+ }
+
+ # $1: ns, $2: port
+@@ -199,6 +203,20 @@ wait_local_port_listen()
+ done
+ }
+
++# $1: cestab nr
++chk_msk_cestab()
++{
++ local expected=$1
++ local msg="....chk ${2:-${expected}} cestab"
++
++ if [ "${expected}" -eq 0 ]; then
++ msg+=" after flush"
++ fi
++
++ __chk_nr "mptcp_lib_get_counter ${ns} MPTcpExtMPCurrEstab" \
++ "${expected}" "${msg}" ""
++}
++
+ wait_connected()
+ {
+ local listener_ns="${1}"
+@@ -235,10 +253,12 @@ wait_connected $ns 10000
+ chk_msk_nr 2 "after MPC handshake "
+ chk_msk_remote_key_nr 2 "....chk remote_key"
+ chk_msk_fallback_nr 0 "....chk no fallback"
+-chk_msk_inuse 2 "....chk 2 msk in use"
++chk_msk_inuse 2
++chk_msk_cestab 2
+ flush_pids
+
+-chk_msk_inuse 0 "....chk 0 msk in use after flush"
++chk_msk_inuse 0 "2->0"
++chk_msk_cestab 0 "2->0"
+
+ echo "a" | \
+ timeout ${timeout_test} \
+@@ -253,10 +273,12 @@ echo "b" | \
+ 127.0.0.1 >/dev/null &
+ wait_connected $ns 10001
+ chk_msk_fallback_nr 1 "check fallback"
+-chk_msk_inuse 1 "....chk 1 msk in use"
++chk_msk_inuse 1
++chk_msk_cestab 1
+ flush_pids
+
+-chk_msk_inuse 0 "....chk 0 msk in use after flush"
++chk_msk_inuse 0 "1->0"
++chk_msk_cestab 0 "1->0"
+
+ NR_CLIENTS=100
+ for I in `seq 1 $NR_CLIENTS`; do
+@@ -277,10 +299,12 @@ for I in `seq 1 $NR_CLIENTS`; do
+ done
+
+ wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+-chk_msk_inuse $((NR_CLIENTS*2)) "....chk many msk in use"
++chk_msk_inuse $((NR_CLIENTS*2)) "many"
++chk_msk_cestab $((NR_CLIENTS*2)) "many"
+ flush_pids
+
+-chk_msk_inuse 0 "....chk 0 msk in use after flush"
++chk_msk_inuse 0 "many->0"
++chk_msk_cestab 0 "many->0"
+
+ mptcp_lib_result_print_all_tap
+ exit $ret
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+index b1fc8afd072dc..10cd322e05c42 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+@@ -341,21 +341,6 @@ do_ping()
+ return 0
+ }
+
+-# $1: ns, $2: MIB counter
+-get_mib_counter()
+-{
+- local listener_ns="${1}"
+- local mib="${2}"
+-
+- # strip the header
+- ip netns exec "${listener_ns}" \
+- nstat -z -a "${mib}" | \
+- tail -n+2 | \
+- while read a count c rest; do
+- echo $count
+- done
+-}
+-
+ # $1: ns, $2: port
+ wait_local_port_listen()
+ {
+@@ -441,12 +426,12 @@ do_transfer()
+ nstat -n
+ fi
+
+- local stat_synrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+- local stat_ackrx_last_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+- local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
+- local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+- local stat_csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+- local stat_csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
++ local stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
++ local stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
++ local stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
++ local stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
++ local stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
++ local stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+
+ timeout ${timeout_test} \
+ ip netns exec ${listener_ns} \
+@@ -509,11 +494,11 @@ do_transfer()
+ check_transfer $cin $sout "file received by server"
+ rets=$?
+
+- local stat_synrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
+- local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
+- local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
+- local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+- local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
++ local stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX")
++ local stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
++ local stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent")
++ local stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv")
++ local stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue")
+
+ expect_synrx=$((stat_synrx_last_l))
+ expect_ackrx=$((stat_ackrx_last_l))
+@@ -542,8 +527,8 @@ do_transfer()
+ fi
+
+ if $checksum; then
+- local csum_err_s=$(get_mib_counter "${listener_ns}" "MPTcpExtDataCsumErr")
+- local csum_err_c=$(get_mib_counter "${connector_ns}" "MPTcpExtDataCsumErr")
++ local csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr")
++ local csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr")
+
+ local csum_err_s_nr=$((csum_err_s - stat_csum_err_s))
+ if [ $csum_err_s_nr -gt 0 ]; then
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 67ca22856d54a..a72104dae2b9c 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -611,25 +611,9 @@ wait_local_port_listen()
+ done
+ }
+
+-# $1: ns ; $2: counter
+-get_counter()
+-{
+- local ns="${1}"
+- local counter="${2}"
+- local count
+-
+- count=$(ip netns exec ${ns} nstat -asz "${counter}" | awk 'NR==1 {next} {print $2}')
+- if [ -z "${count}" ]; then
+- mptcp_lib_fail_if_expected_feature "${counter} counter"
+- return 1
+- fi
+-
+- echo "${count}"
+-}
+-
+ rm_addr_count()
+ {
+- get_counter "${1}" "MPTcpExtRmAddr"
++ mptcp_lib_get_counter "${1}" "MPTcpExtRmAddr"
+ }
+
+ # $1: ns, $2: old rm_addr counter in $ns
+@@ -649,7 +633,7 @@ wait_rm_addr()
+
+ rm_sf_count()
+ {
+- get_counter "${1}" "MPTcpExtRmSubflow"
++ mptcp_lib_get_counter "${1}" "MPTcpExtRmSubflow"
+ }
+
+ # $1: ns, $2: old rm_sf counter in $ns
+@@ -672,11 +656,11 @@ wait_mpj()
+ local ns="${1}"
+ local cnt old_cnt
+
+- old_cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
++ old_cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
+
+ local i
+ for i in $(seq 10); do
+- cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
++ cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
+ [ "$cnt" = "${old_cnt}" ] || break
+ sleep 0.1
+ done
+@@ -688,13 +672,6 @@ kill_events_pids()
+ mptcp_lib_kill_wait $evts_ns2_pid
+ }
+
+-kill_tests_wait()
+-{
+- #shellcheck disable=SC2046
+- kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
+- wait
+-}
+-
+ pm_nl_set_limits()
+ {
+ local ns=$1
+@@ -1278,7 +1255,7 @@ chk_csum_nr()
+ fi
+
+ print_check "sum"
+- count=$(get_counter ${ns1} "MPTcpExtDataCsumErr")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
+ if [ "$count" != "$csum_ns1" ]; then
+ extra_msg="$extra_msg ns1=$count"
+ fi
+@@ -1291,7 +1268,7 @@ chk_csum_nr()
+ print_ok
+ fi
+ print_check "csum"
+- count=$(get_counter ${ns2} "MPTcpExtDataCsumErr")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
+ if [ "$count" != "$csum_ns2" ]; then
+ extra_msg="$extra_msg ns2=$count"
+ fi
+@@ -1335,7 +1312,7 @@ chk_fail_nr()
+ fi
+
+ print_check "ftx"
+- count=$(get_counter ${ns_tx} "MPTcpExtMPFailTx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
+ if [ "$count" != "$fail_tx" ]; then
+ extra_msg="$extra_msg,tx=$count"
+ fi
+@@ -1349,7 +1326,7 @@ chk_fail_nr()
+ fi
+
+ print_check "failrx"
+- count=$(get_counter ${ns_rx} "MPTcpExtMPFailRx")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
+ if [ "$count" != "$fail_rx" ]; then
+ extra_msg="$extra_msg,rx=$count"
+ fi
+@@ -1382,7 +1359,7 @@ chk_fclose_nr()
+ fi
+
+ print_check "ctx"
+- count=$(get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$fclose_tx" ]; then
+@@ -1393,7 +1370,7 @@ chk_fclose_nr()
+ fi
+
+ print_check "fclzrx"
+- count=$(get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$fclose_rx" ]; then
+@@ -1423,7 +1400,7 @@ chk_rst_nr()
+ fi
+
+ print_check "rtx"
+- count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
++ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPRstTx")
+ if [ -z "$count" ]; then
+ print_skip
+ # accept more rst than expected except if we don't expect any
+@@ -1435,7 +1412,7 @@ chk_rst_nr()
+ fi
+
+ print_check "rstrx"
+- count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
++ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPRstRx")
+ if [ -z "$count" ]; then
+ print_skip
+ # accept more rst than expected except if we don't expect any
+@@ -1456,7 +1433,7 @@ chk_infi_nr()
+ local count
+
+ print_check "itx"
+- count=$(get_counter ${ns2} "MPTcpExtInfiniteMapTx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtInfiniteMapTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$infi_tx" ]; then
+@@ -1466,7 +1443,7 @@ chk_infi_nr()
+ fi
+
+ print_check "infirx"
+- count=$(get_counter ${ns1} "MPTcpExtInfiniteMapRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtInfiniteMapRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$infi_rx" ]; then
+@@ -1495,7 +1472,7 @@ chk_join_nr()
+ fi
+
+ print_check "syn"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinSynRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_nr" ]; then
+@@ -1506,7 +1483,7 @@ chk_join_nr()
+
+ print_check "synack"
+ with_cookie=$(ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies)
+- count=$(get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_ack_nr" ]; then
+@@ -1523,7 +1500,7 @@ chk_join_nr()
+ fi
+
+ print_check "ack"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinAckRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$ack_nr" ]; then
+@@ -1556,8 +1533,8 @@ chk_stale_nr()
+
+ print_check "stale"
+
+- stale_nr=$(get_counter ${ns} "MPTcpExtSubflowStale")
+- recover_nr=$(get_counter ${ns} "MPTcpExtSubflowRecover")
++ stale_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowStale")
++ recover_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowRecover")
+ if [ -z "$stale_nr" ] || [ -z "$recover_nr" ]; then
+ print_skip
+ elif [ $stale_nr -lt $stale_min ] ||
+@@ -1594,7 +1571,7 @@ chk_add_nr()
+ timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
+
+ print_check "add"
+- count=$(get_counter ${ns2} "MPTcpExtAddAddr")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtAddAddr")
+ if [ -z "$count" ]; then
+ print_skip
+ # if the test configured a short timeout tolerate greater then expected
+@@ -1606,7 +1583,7 @@ chk_add_nr()
+ fi
+
+ print_check "echo"
+- count=$(get_counter ${ns1} "MPTcpExtEchoAdd")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtEchoAdd")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$echo_nr" ]; then
+@@ -1617,7 +1594,7 @@ chk_add_nr()
+
+ if [ $port_nr -gt 0 ]; then
+ print_check "pt"
+- count=$(get_counter ${ns2} "MPTcpExtPortAdd")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtPortAdd")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$port_nr" ]; then
+@@ -1627,7 +1604,7 @@ chk_add_nr()
+ fi
+
+ print_check "syn"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_nr" ]; then
+@@ -1638,7 +1615,7 @@ chk_add_nr()
+ fi
+
+ print_check "synack"
+- count=$(get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$syn_ack_nr" ]; then
+@@ -1649,7 +1626,7 @@ chk_add_nr()
+ fi
+
+ print_check "ack"
+- count=$(get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$ack_nr" ]; then
+@@ -1660,7 +1637,7 @@ chk_add_nr()
+ fi
+
+ print_check "syn"
+- count=$(get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mis_syn_nr" ]; then
+@@ -1671,7 +1648,7 @@ chk_add_nr()
+ fi
+
+ print_check "ack"
+- count=$(get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mis_ack_nr" ]; then
+@@ -1693,7 +1670,7 @@ chk_add_tx_nr()
+ timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
+
+ print_check "add TX"
+- count=$(get_counter ${ns1} "MPTcpExtAddAddrTx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtAddAddrTx")
+ if [ -z "$count" ]; then
+ print_skip
+ # if the test configured a short timeout tolerate greater then expected
+@@ -1705,7 +1682,7 @@ chk_add_tx_nr()
+ fi
+
+ print_check "echo TX"
+- count=$(get_counter ${ns2} "MPTcpExtEchoAddTx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtEchoAddTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$echo_tx_nr" ]; then
+@@ -1743,7 +1720,7 @@ chk_rm_nr()
+ fi
+
+ print_check "rm"
+- count=$(get_counter ${addr_ns} "MPTcpExtRmAddr")
++ count=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmAddr")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$rm_addr_nr" ]; then
+@@ -1753,13 +1730,13 @@ chk_rm_nr()
+ fi
+
+ print_check "rmsf"
+- count=$(get_counter ${subflow_ns} "MPTcpExtRmSubflow")
++ count=$(mptcp_lib_get_counter ${subflow_ns} "MPTcpExtRmSubflow")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ -n "$simult" ]; then
+ local cnt suffix
+
+- cnt=$(get_counter ${addr_ns} "MPTcpExtRmSubflow")
++ cnt=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmSubflow")
+
+ # in case of simult flush, the subflow removal count on each side is
+ # unreliable
+@@ -1785,7 +1762,7 @@ chk_rm_tx_nr()
+ local rm_addr_tx_nr=$1
+
+ print_check "rm TX"
+- count=$(get_counter ${ns2} "MPTcpExtRmAddrTx")
++ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtRmAddrTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$rm_addr_tx_nr" ]; then
+@@ -1802,7 +1779,7 @@ chk_prio_nr()
+ local count
+
+ print_check "ptx"
+- count=$(get_counter ${ns1} "MPTcpExtMPPrioTx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioTx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mp_prio_nr_tx" ]; then
+@@ -1812,7 +1789,7 @@ chk_prio_nr()
+ fi
+
+ print_check "prx"
+- count=$(get_counter ${ns1} "MPTcpExtMPPrioRx")
++ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mp_prio_nr_rx" ]; then
+@@ -1912,7 +1889,7 @@ wait_attempt_fail()
+ while [ $time -lt $timeout_ms ]; do
+ local cnt
+
+- cnt=$(get_counter ${ns} "TcpAttemptFails")
++ cnt=$(mptcp_lib_get_counter ${ns} "TcpAttemptFails")
+
+ [ "$cnt" = 1 ] && return 1
+ time=$((time + 100))
+@@ -3430,7 +3407,7 @@ userspace_tests()
+ chk_rm_nr 1 1 invert
+ chk_mptcp_info subflows 0 subflows 0
+ kill_events_pids
+- wait $tests_pid
++ mptcp_lib_kill_wait $tests_pid
+ fi
+
+ # userspace pm create destroy subflow
+@@ -3449,7 +3426,7 @@ userspace_tests()
+ chk_rm_nr 1 1
+ chk_mptcp_info subflows 0 subflows 0
+ kill_events_pids
+- wait $tests_pid
++ mptcp_lib_kill_wait $tests_pid
+ fi
+ }
+
+@@ -3463,7 +3440,8 @@ endpoint_tests()
+ pm_nl_set_limits $ns2 2 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ speed=slow \
+- run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
+
+ wait_mpj $ns1
+ pm_nl_check_endpoint "creation" \
+@@ -3478,7 +3456,7 @@ endpoint_tests()
+ pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
+ pm_nl_check_endpoint "modif is allowed" \
+ $ns2 10.0.2.2 id 1 flags signal
+- kill_tests_wait
++ mptcp_lib_kill_wait $tests_pid
+ fi
+
+ if reset "delete and re-add" &&
+@@ -3487,7 +3465,8 @@ endpoint_tests()
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+ test_linkfail=4 speed=20 \
+- run_tests $ns1 $ns2 10.0.1.1 2>/dev/null &
++ run_tests $ns1 $ns2 10.0.1.1 &
++ local tests_pid=$!
+
+ wait_mpj $ns2
+ chk_subflow_nr "before delete" 2
+@@ -3502,7 +3481,7 @@ endpoint_tests()
+ wait_mpj $ns2
+ chk_subflow_nr "after re-add" 2
+ chk_mptcp_info subflows 1 subflows 1
+- kill_tests_wait
++ mptcp_lib_kill_wait $tests_pid
+ fi
+ }
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+index 4cd4297ca86de..2b10f200de402 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
+@@ -216,3 +216,19 @@ mptcp_lib_kill_wait() {
+ kill "${1}" > /dev/null 2>&1
+ wait "${1}" 2>/dev/null
+ }
++
++# $1: ns, $2: MIB counter
++mptcp_lib_get_counter() {
++ local ns="${1}"
++ local counter="${2}"
++ local count
++
++ count=$(ip netns exec "${ns}" nstat -asz "${counter}" |
++ awk 'NR==1 {next} {print $2}')
++ if [ -z "${count}" ]; then
++ mptcp_lib_fail_if_expected_feature "${counter} counter"
++ return 1
++ fi
++
++ echo "${count}"
++}
+diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+index 8f4ff123a7eb9..71899a3ffa7a9 100755
+--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
++++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
+@@ -183,7 +183,7 @@ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow 10.0.1.1" " (nobackup)"
+
+ # fullmesh support has been added later
+-ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh
++ip netns exec $ns1 ./pm_nl_ctl set id 1 flags fullmesh 2>/dev/null
+ if ip netns exec $ns1 ./pm_nl_ctl dump | grep -q "fullmesh" ||
+ mptcp_lib_expect_all_features; then
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+@@ -194,6 +194,12 @@ subflow 10.0.1.1" " (nofullmesh)"
+ ip netns exec $ns1 ./pm_nl_ctl set id 1 flags backup,fullmesh
+ check "ip netns exec $ns1 ./pm_nl_ctl dump" "id 1 flags \
+ subflow,backup,fullmesh 10.0.1.1" " (backup,fullmesh)"
++else
++ for st in fullmesh nofullmesh backup,fullmesh; do
++ st=" (${st})"
++ printf "%-50s%s\n" "${st}" "[SKIP]"
++ mptcp_lib_result_skip "${st}"
++ done
+ fi
+
+ mptcp_lib_result_print_all_tap
+diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
+index ce9203b817f88..9096bf5794888 100755
+--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
++++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
+@@ -267,7 +267,8 @@ run_test()
+ [ $bail -eq 0 ] || exit $ret
+ fi
+
+- printf "%-60s" "$msg - reverse direction"
++ msg+=" - reverse direction"
++ printf "%-60s" "${msg}"
+ do_transfer $large $small $time
+ lret=$?
+ mptcp_lib_result_code "${lret}" "${msg}"
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index c44bf5c7c6e04..0e748068ee95e 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -75,7 +75,7 @@ print_test()
+ {
+ test_name="${1}"
+
+- _printf "%-63s" "${test_name}"
++ _printf "%-68s" "${test_name}"
+ }
+
+ print_results()
+@@ -555,7 +555,7 @@ verify_subflow_events()
+ local remid
+ local info
+
+- info="${e_saddr} (${e_from}) => ${e_daddr} (${e_to})"
++ info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
+
+ if [ "$e_type" = "$SUB_ESTABLISHED" ]
+ then
+@@ -887,9 +887,10 @@ test_prio()
+
+ # Check TX
+ print_test "MP_PRIO TX"
+- count=$(ip netns exec "$ns2" nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
+- [ -z "$count" ] && count=0
+- if [ $count != 1 ]; then
++ count=$(mptcp_lib_get_counter "$ns2" "MPTcpExtMPPrioTx")
++ if [ -z "$count" ]; then
++ test_skip
++ elif [ $count != 1 ]; then
+ test_fail "Count != 1: ${count}"
+ else
+ test_pass
+@@ -897,9 +898,10 @@ test_prio()
+
+ # Check RX
+ print_test "MP_PRIO RX"
+- count=$(ip netns exec "$ns1" nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
+- [ -z "$count" ] && count=0
+- if [ $count != 1 ]; then
++ count=$(mptcp_lib_get_counter "$ns1" "MPTcpExtMPPrioRx")
++ if [ -z "$count" ]; then
++ test_skip
++ elif [ $count != 1 ]; then
+ test_fail "Count != 1: ${count}"
+ else
+ test_pass
+diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
+index 9b8434f62f570..2e0db9c5be6c3 100644
+--- a/tools/testing/selftests/riscv/mm/mmap_test.h
++++ b/tools/testing/selftests/riscv/mm/mmap_test.h
+@@ -18,6 +18,8 @@ struct addresses {
+ int *on_56_addr;
+ };
+
++// Only works on 64 bit
++#if __riscv_xlen == 64
+ static inline void do_mmaps(struct addresses *mmap_addresses)
+ {
+ /*
+@@ -50,6 +52,7 @@ static inline void do_mmaps(struct addresses *mmap_addresses)
+ mmap_addresses->on_56_addr =
+ mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ }
++#endif /* __riscv_xlen == 64 */
+
+ static inline int memory_layout(void)
+ {
+diff --git a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
+index 66764edb0d526..1dd94197da30c 100644
+--- a/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
++++ b/tools/testing/selftests/riscv/vector/v_initval_nolibc.c
+@@ -27,7 +27,7 @@ int main(void)
+
+ datap = malloc(MAX_VSIZE);
+ if (!datap) {
+- ksft_test_result_fail("fail to allocate memory for size = %lu\n", MAX_VSIZE);
++ ksft_test_result_fail("fail to allocate memory for size = %d\n", MAX_VSIZE);
+ exit(-1);
+ }
+
+diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c
+index b348b475be570..8ad94e08ff4d0 100644
+--- a/tools/testing/selftests/riscv/vector/vstate_prctl.c
++++ b/tools/testing/selftests/riscv/vector/vstate_prctl.c
+@@ -68,7 +68,7 @@ int test_and_compare_child(long provided, long expected, int inherit)
+ }
+ rc = launch_test(inherit);
+ if (rc != expected) {
+- ksft_test_result_fail("Test failed, check %d != %d\n", rc,
++ ksft_test_result_fail("Test failed, check %d != %ld\n", rc,
+ expected);
+ return -2;
+ }
+@@ -87,7 +87,7 @@ int main(void)
+ pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
+ rc = riscv_hwprobe(&pair, 1, 0, NULL, 0);
+ if (rc < 0) {
+- ksft_test_result_fail("hwprobe() failed with %d\n", rc);
++ ksft_test_result_fail("hwprobe() failed with %ld\n", rc);
+ return -1;
+ }
+