From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org, akpm@linux-foundation.org,
torvalds@linux-foundation.org, stable@vger.kernel.org
Cc: lwn@lwn.net, jslaby@suse.cz,
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Subject: Re: Linux 6.12.78
Date: Wed, 25 Mar 2026 11:15:55 +0100 [thread overview]
Message-ID: <2026032555-propeller-snide-52f6@gregkh> (raw)
In-Reply-To: <2026032555-absentee-wafer-f27a@gregkh>
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index edc070c6e19b..d196d128ce98 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8107,6 +8107,58 @@ KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM
or moved memslot isn't reachable, i.e KVM
_may_ invalidate only SPTEs related to the
memslot.
+
+KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the
+ vCPU's MSR_IA32_PERF_CAPABILITIES (0x345),
+ MSR_IA32_ARCH_CAPABILITIES (0x10a),
+ MSR_PLATFORM_INFO (0xce), and all VMX MSRs
+ (0x480..0x492) to the maximal capabilities
+ supported by KVM. KVM also sets
+ MSR_IA32_UCODE_REV (0x8b) to an arbitrary
+ value (which is different for Intel vs.
+ AMD). Lastly, when guest CPUID is set (by
+ userspace), KVM modifies select VMX MSR
+ fields to force consistency between guest
+ CPUID and L2's effective ISA. When this
+ quirk is disabled, KVM zeroes the vCPU's MSR
+ values (with two exceptions, see below),
+ i.e. treats the feature MSRs like CPUID
+ leaves and gives userspace full control of
+ the vCPU model definition. This quirk does
+ not affect VMX MSRs CR0/CR4_FIXED1 (0x487
+ and 0x489), as KVM does now allow them to
+ be set by userspace (KVM sets them based on
+ guest CPUID, for safety purposes).
+
+KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores
+ guest PAT and forces the effective memory
+ type to WB in EPT. The quirk is not available
+ on Intel platforms which are incapable of
+ safely honoring guest PAT (i.e., without CPU
+ self-snoop, KVM always ignores guest PAT and
+ forces effective memory type to WB). It is
+ also ignored on AMD platforms or, on Intel,
+ when a VM has non-coherent DMA devices
+ assigned; KVM always honors guest PAT in
+ such case. The quirk is needed to avoid
+ slowdowns on certain Intel Xeon platforms
+ (e.g. ICX, SPR) where self-snoop feature is
+ supported but UC is slow enough to cause
+ issues with some older guests that use
+ UC instead of WC to map the video RAM.
+ Userspace can disable the quirk to honor
+ guest PAT if it knows that there is no such
+ guest software, for example if it does not
+ expose a bochs graphics device (which is
+ known to have had a buggy driver).
+
+KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM By default, KVM relaxes the consistency
+ check for GUEST_IA32_DEBUGCTL in vmcs12
+ to allow FREEZE_IN_SMM to be set. When
+ this quirk is disabled, KVM requires this
+ bit to be cleared. Note that the vmcs02
+ bit is still completely controlled by the
+ host, regardless of the quirk setting.
=================================== ============================================
7.32 KVM_CAP_MAX_VCPU_ID
diff --git a/Makefile b/Makefile
index 930fcea203d7..80b450e0a7d4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 12
-SUBLEVEL = 77
+SUBLEVEL = 78
EXTRAVERSION =
NAME = Baby Opossum Posse
@@ -446,6 +446,7 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
export rust_common_flags := --edition=2021 \
-Zbinary_dep_depinfo=y \
-Astable_features \
+ -Aunused_features \
-Dnon_ascii_idents \
-Dunsafe_op_in_unsafe_fn \
-Wmissing_docs \
@@ -1370,13 +1371,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
endif
-PHONY += objtool_clean
+PHONY += objtool_clean objtool_mrproper
objtool_O = $(abspath $(objtree))/tools/objtool
-objtool_clean:
+objtool_clean objtool_mrproper:
ifneq ($(wildcard $(objtool_O)),)
- $(Q)$(MAKE) -sC $(abs_srctree)/tools/objtool O=$(objtool_O) srctree=$(abs_srctree) clean
+ $(Q)$(MAKE) -sC $(abs_srctree)/tools/objtool O=$(objtool_O) srctree=$(abs_srctree) $(patsubst objtool_%,%,$@)
endif
tools/: FORCE
@@ -1547,7 +1548,7 @@ PHONY += $(mrproper-dirs) mrproper
$(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
-mrproper: clean $(mrproper-dirs)
+mrproper: clean objtool_mrproper $(mrproper-dirs)
$(call cmd,rmfiles)
@find . $(RCS_FIND_IGNORE) \
\( -name '*.rmeta' \) \
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 80ceb5bd2680..dd430477e7c1 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -127,29 +127,6 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
}
-static void machine_kexec_mask_interrupts(void)
-{
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
-
- chip = irq_desc_get_chip(desc);
- if (!chip)
- continue;
-
- if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
- }
-}
-
void machine_crash_shutdown(struct pt_regs *regs)
{
local_irq_disable();
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0e2902f38e70..f487c5e21e2f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -146,6 +146,7 @@ config ARM64
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IOREMAP
select GENERIC_IRQ_IPI
+ select GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
index 934bf9cfc5ac..56840b6ed644 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -246,7 +246,6 @@ vreg_l12a_1p8: ldo12 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
- regulator-boot-on;
};
vreg_l14a_1p88: ldo14 {
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
index 1ad5a1b6917f..5c7b9e296f43 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
@@ -201,16 +201,6 @@ ostm7: timer@12c03000 {
status = "disabled";
};
- wdt0: watchdog@11c00400 {
- compatible = "renesas,r9a09g057-wdt";
- reg = <0 0x11c00400 0 0x400>;
- clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>;
- clock-names = "pclk", "oscclk";
- resets = <&cpg 0x75>;
- power-domains = <&cpg>;
- status = "disabled";
- };
-
wdt1: watchdog@14400000 {
compatible = "renesas,r9a09g057-wdt";
reg = <0 0x14400000 0 0x400>;
@@ -221,23 +211,18 @@ wdt1: watchdog@14400000 {
status = "disabled";
};
- wdt2: watchdog@13000000 {
- compatible = "renesas,r9a09g057-wdt";
- reg = <0 0x13000000 0 0x400>;
- clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>;
- clock-names = "pclk", "oscclk";
- resets = <&cpg 0x77>;
- power-domains = <&cpg>;
- status = "disabled";
- };
-
- wdt3: watchdog@13000400 {
- compatible = "renesas,r9a09g057-wdt";
- reg = <0 0x13000400 0 0x400>;
- clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>;
- clock-names = "pclk", "oscclk";
- resets = <&cpg 0x78>;
+ rtc: rtc@11c00800 {
+ compatible = "renesas,r9a09g057-rtca3", "renesas,rz-rtca3";
+ reg = <0 0x11c00800 0 0x400>;
+ interrupts = <GIC_SPI 524 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 525 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 526 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "alarm", "period", "carry";
+ clocks = <&cpg CPG_MOD 0x53>, <&rtxin_clk>;
+ clock-names = "bus", "counter";
power-domains = <&cpg>;
+ resets = <&cpg 0x79>, <&cpg 0x7a>;
+ reset-names = "rtc", "rtest";
status = "disabled";
};
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 3ce7c632fbfb..3bd07d512cde 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -52,11 +52,11 @@
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
-#define _PAGE_KERNEL (PROT_NORMAL)
-#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
-#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
-#define _PAGE_KERNEL_EXEC (PROT_NORMAL & ~PTE_PXN)
-#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
+#define _PAGE_KERNEL (PROT_NORMAL | PTE_DIRTY)
+#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY | PTE_DIRTY)
+#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY | PTE_DIRTY)
+#define _PAGE_KERNEL_EXEC ((PROT_NORMAL & ~PTE_PXN) | PTE_DIRTY)
+#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT | PTE_DIRTY)
#define _PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define _PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 82e2203d86a3..6f121a0164a4 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -207,37 +207,6 @@ void machine_kexec(struct kimage *kimage)
BUG(); /* Should never get here. */
}
-static void machine_kexec_mask_interrupts(void)
-{
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
- int ret;
-
- chip = irq_desc_get_chip(desc);
- if (!chip)
- continue;
-
- /*
- * First try to remove the active state. If this
- * fails, try to EOI the interrupt.
- */
- ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
-
- if (ret && irqd_irq_inprogress(&desc->irq_data) &&
- chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
- }
-}
-
/**
* machine_crash_shutdown - shutdown non-crashing cpus and save registers
*/
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index 55107d27d3f8..726c56aa2dfc 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -390,6 +390,27 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes);
+static bool contpte_all_subptes_match_access_flags(pte_t *ptep, pte_t entry)
+{
+ pte_t *cont_ptep = contpte_align_down(ptep);
+ /*
+ * PFNs differ per sub-PTE. Match only bits consumed by
+ * __ptep_set_access_flags(): AF, DIRTY and write permission.
+ */
+ const pteval_t cmp_mask = PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
+ pteval_t entry_cmp = pte_val(entry) & cmp_mask;
+ int i;
+
+ for (i = 0; i < CONT_PTES; i++) {
+ pteval_t pte_cmp = pte_val(__ptep_get(cont_ptep + i)) & cmp_mask;
+
+ if (pte_cmp != entry_cmp)
+ return false;
+ }
+
+ return true;
+}
+
int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t entry, int dirty)
@@ -399,13 +420,37 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
int i;
/*
- * Gather the access/dirty bits for the contiguous range. If nothing has
- * changed, its a noop.
+ * Check whether all sub-PTEs in the CONT block already match the
+ * requested access flags/write permission, using raw per-PTE values
+ * rather than the gathered ptep_get() view.
+ *
+ * __ptep_set_access_flags() can update AF, dirty and write
+ * permission, but only to make the mapping more permissive.
+ *
+ * ptep_get() gathers AF/dirty state across the whole CONT block,
+ * which is correct for a CPU with FEAT_HAFDBS. But page-table
+ * walkers that evaluate each descriptor individually (e.g. a CPU
+ * without DBM support, or an SMMU without HTTU, or with HA/HD
+ * disabled in CD.TCR) can keep faulting on the target sub-PTE if
+ * only a sibling has been updated. Gathering can therefore cause
+ * false no-ops when only a sibling has been updated:
+ * - write faults: target still has PTE_RDONLY (needs PTE_RDONLY cleared)
+ * - read faults: target still lacks PTE_AF
+ *
+ * Per Arm ARM (DDI 0487) D8.7.1, any sub-PTE in a CONT range may
+ * become the effective cached translation, so all entries must have
+ * consistent attributes. Check the full CONT block before returning
+ * no-op, and when any sub-PTE mismatches, proceed to update the whole
+ * range.
*/
- orig_pte = pte_mknoncont(ptep_get(ptep));
- if (pte_val(orig_pte) == pte_val(entry))
+ if (contpte_all_subptes_match_access_flags(ptep, entry))
return 0;
+ /*
+ * Use raw target pte (not gathered) for write-bit unfold decision.
+ */
+ orig_pte = pte_mknoncont(__ptep_get(ptep));
+
/*
* We can fix up access/dirty bits without having to unfold the contig
* range. But if the write bit is changing, we must unfold.
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 0d22991ae430..a9b40d231343 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -196,8 +196,13 @@ do { \
\
__get_kernel_common(*((type *)(dst)), sizeof(type), \
(__force type *)(src)); \
- if (unlikely(__gu_err)) \
+ if (unlikely(__gu_err)) { \
+ pr_info("%s: memory access failed, ecode 0x%x\n", \
+ __func__, read_csr_excode()); \
+ pr_info("%s: the caller is %pS\n", \
+ __func__, __builtin_return_address(0)); \
goto err_label; \
+ } \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -207,8 +212,13 @@ do { \
\
__pu_val = *(__force type *)(src); \
__put_kernel_common(((type *)(dst)), sizeof(type)); \
- if (unlikely(__pu_err)) \
+ if (unlikely(__pu_err)) { \
+ pr_info("%s: memory access failed, ecode 0x%x\n", \
+ __func__, read_csr_excode()); \
+ pr_info("%s: the caller is %pS\n", \
+ __func__, __builtin_return_address(0)); \
goto err_label; \
+ } \
} while (0)
extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 3446a5e2520b..93772c82c62f 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -85,7 +85,7 @@ extern void __update_cache(pte_t pte);
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
-#if defined(CONFIG_64BIT)
+#if defined(CONFIG_64BIT) || defined(CONFIG_KALLSYMS)
#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
#else
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 37ca484cc495..ce545176378e 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -953,7 +953,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
#else
"1: cmpb,<<,n %0,%2,1b\n"
#endif
- " fic,m %3(%4,%0)\n"
+ " fdc,m %3(%4,%0)\n"
"2: sync\n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
: "+r" (start), "+r" (error)
@@ -968,7 +968,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
#else
"1: cmpb,<<,n %0,%2,1b\n"
#endif
- " fdc,m %3(%4,%0)\n"
+ " fic,m %3(%4,%0)\n"
"2: sync\n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
: "+r" (start), "+r" (error)
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 96e0264ac961..9188c8d87437 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -56,6 +56,7 @@ ENTRY(parisc_kernel_start)
.import __bss_start,data
.import __bss_stop,data
+ .import __end,data
load32 PA(__bss_start),%r3
load32 PA(__bss_stop),%r4
@@ -149,7 +150,11 @@ $cpu_ok:
* everything ... it will get remapped correctly later */
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
- load32 PA(pg0),%r1
+ load32 PA(_end),%r1
+ SHRREG %r1,PAGE_SHIFT,%r1 /* %r1 is PFN count for _end symbol */
+ cmpb,<<,n %r11,%r1,1f
+ copy %r1,%r11 /* %r1 PFN count smaller than %r11 */
+1: load32 PA(pg0),%r1
$pgt_fill_loop:
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index ace483b6f19a..d3e17a7a8901 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -120,14 +120,6 @@ void __init setup_arch(char **cmdline_p)
#endif
printk(KERN_CONT ".\n");
- /*
- * Check if initial kernel page mappings are sufficient.
- * panic early if not, else we may access kernel functions
- * and variables which can't be reached.
- */
- if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
- panic("KERNEL_INITIAL_ORDER too small!");
-
#ifdef CONFIG_64BIT
if(parisc_narrow_firmware) {
printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
@@ -279,6 +271,18 @@ void __init start_parisc(void)
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
+ /*
+ * Check if initial kernel page mapping is sufficient.
+ * Print warning if not, because we may access kernel functions and
+ * variables which can't be reached yet through the initial mappings.
+ * Note that the panic() and printk() functions are not functional
+ * yet, so we need to use direct iodc() firmware calls instead.
+ */
+ const char warn1[] = "CRITICAL: Kernel may crash because "
+ "KERNEL_INITIAL_ORDER is too small.\n";
+ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
+ pdc_iodc_print(warn1, sizeof(warn1) - 1);
+
/* check QEMU/SeaBIOS marker in PAGE0 */
running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 270ee93a0f7d..601e569303e1 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -61,7 +61,6 @@ struct pt_regs;
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern void default_machine_kexec(struct kimage *image);
-extern void machine_kexec_mask_interrupts(void);
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 3987a5c33558..929f7050c73a 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -253,7 +253,7 @@ __gus_failed: \
".section .fixup,\"ax\"\n" \
"4: li %0,%3\n" \
" li %1,0\n" \
- " li %1+1,0\n" \
+ " li %L1,0\n" \
" b 3b\n" \
".previous\n" \
EX_TABLE(1b, 4b) \
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index b8333a49ea5d..31797f2145ec 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -22,27 +22,8 @@
#include <asm/setup.h>
#include <asm/firmware.h>
-void machine_kexec_mask_interrupts(void) {
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
-
- chip = irq_desc_get_chip(desc);
- if (!chip)
- continue;
-
- if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
- }
-}
+#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
+#define __be_word __PASTE(__be, BITS_PER_LONG)
#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
@@ -178,37 +159,28 @@ int __init overlaps_crashkernel(unsigned long start, unsigned long size)
}
/* Values we need to export to the second kernel via the device tree. */
-static phys_addr_t kernel_end;
-static phys_addr_t crashk_base;
-static phys_addr_t crashk_size;
-static unsigned long long mem_limit;
-
-static struct property kernel_end_prop = {
- .name = "linux,kernel-end",
- .length = sizeof(phys_addr_t),
- .value = &kernel_end,
-};
+static __be_word crashk_base;
+static __be_word crashk_size;
+static __be_word mem_limit;
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
- .length = sizeof(phys_addr_t),
+ .length = sizeof(__be_word),
.value = &crashk_base
};
static struct property crashk_size_prop = {
.name = "linux,crashkernel-size",
- .length = sizeof(phys_addr_t),
+ .length = sizeof(__be_word),
.value = &crashk_size,
};
static struct property memory_limit_prop = {
.name = "linux,memory-limit",
- .length = sizeof(unsigned long long),
+ .length = sizeof(__be_word),
.value = &mem_limit,
};
-#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
-
static void __init export_crashk_values(struct device_node *node)
{
/* There might be existing crash kernel properties, but we can't
@@ -232,6 +204,15 @@ static void __init export_crashk_values(struct device_node *node)
mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop);
}
+#endif /* CONFIG_CRASH_RESERVE */
+
+static __be_word kernel_end;
+
+static struct property kernel_end_prop = {
+ .name = "linux,kernel-end",
+ .length = sizeof(__be_word),
+ .value = &kernel_end,
+};
static int __init kexec_setup(void)
{
@@ -242,16 +223,17 @@ static int __init kexec_setup(void)
return -ENOENT;
/* remove any stale properties so ours can be found */
- of_remove_property(node, of_find_property(node, kernel_end_prop.name, NULL));
+ of_remove_property(node, of_find_property(node, kernel_end_prop.name,
+ NULL));
/* information needed by userspace when using default_machine_kexec */
kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop);
+#ifdef CONFIG_CRASH_RESERVE
export_crashk_values(node);
-
+#endif
of_node_put(node);
return 0;
}
late_initcall(kexec_setup);
-#endif /* CONFIG_CRASH_RESERVE */
diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c
index c95f96850c9e..deb28eb44f30 100644
--- a/arch/powerpc/kexec/core_32.c
+++ b/arch/powerpc/kexec/core_32.c
@@ -7,6 +7,7 @@
* Copyright (C) 2005 IBM Corporation.
*/
+#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/string.h>
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index dc65c1391157..248a0f00a291 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -633,6 +633,11 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
kbuf->buffer = headers;
kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf->bufsz = headers_sz;
+
+ /*
+ * Account for extra space required to accommodate additional memory
+ * ranges in elfcorehdr due to memory hotplug events.
+ */
kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem);
kbuf->top_down = false;
@@ -643,7 +648,14 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
}
image->elf_load_addr = kbuf->mem;
- image->elf_headers_sz = headers_sz;
+
+ /*
+ * If CONFIG_CRASH_HOTPLUG is enabled, the elfcorehdr kexec segment
+ * memsz can be larger than bufsz. Always initialize elf_headers_sz
+ * with memsz. This ensures the correct size is reserved for elfcorehdr
+ * memory in the FDT prepared for kdump.
+ */
+ image->elf_headers_sz = kbuf->memsz;
image->elf_headers = headers;
out:
kfree(cmem);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2a36cc2e7e9e..55c3b64a5f3a 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -362,7 +362,7 @@ void bpf_jit_free(struct bpf_prog *fp)
bool bpf_jit_supports_kfunc_call(void)
{
- return true;
+ return IS_ENABLED(CONFIG_PPC64);
}
bool bpf_jit_supports_far_kfunc_call(void)
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 2cbcdf93cc19..ce9d946fe3c1 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -202,14 +202,22 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-static int
-bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
long reladdr;
- if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
- return -EINVAL;
+ /* bpf to bpf call, func is not known in the initial pass. Emit 5 nops as a placeholder */
+ if (!func) {
+ for (int i = 0; i < 5; i++)
+ EMIT(PPC_RAW_NOP());
+ /* elfv1 needs an additional instruction to load addr from descriptor */
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+ return 0;
+ }
#ifdef CONFIG_PPC_KERNEL_PCREL
reladdr = func_addr - local_paca->kernelbase;
@@ -266,7 +274,8 @@ bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx,
* We can clobber r2 since we get called through a
* function pointer (so caller will save/restore r2).
*/
- EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
+ if (is_module_text_address(func_addr))
+ EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
} else {
PPC_LI64(_R12, func);
EMIT(PPC_RAW_MTCTR(_R12));
@@ -276,42 +285,87 @@ bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx,
* Load r2 with kernel TOC as kernel TOC is used if function address falls
* within core kernel text.
*/
- EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
+ if (is_module_text_address(func_addr))
+ EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
}
#endif
return 0;
}
-int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
+static int zero_extend(u32 *image, struct codegen_context *ctx, u32 src_reg, u32 dst_reg, u32 size)
{
- unsigned int i, ctx_idx = ctx->idx;
-
- if (WARN_ON_ONCE(func && is_module_text_address(func)))
- return -EINVAL;
-
- /* skip past descriptor if elf v1 */
- func += FUNCTION_DESCR_SIZE;
-
- /* Load function address into r12 */
- PPC_LI64(_R12, func);
-
- /* For bpf-to-bpf function calls, the callee's address is unknown
- * until the last extra pass. As seen above, we use PPC_LI64() to
- * load the callee's address, but this may optimize the number of
- * instructions required based on the nature of the address.
- *
- * Since we don't want the number of instructions emitted to increase,
- * we pad the optimized PPC_LI64() call with NOPs to guarantee that
- * we always have a five-instruction sequence, which is the maximum
- * that PPC_LI64() can emit.
- */
- if (!image)
- for (i = ctx->idx - ctx_idx; i < 5; i++)
- EMIT(PPC_RAW_NOP());
+ switch (size) {
+ case 1:
+ /* zero-extend 8 bits into 64 bits */
+ EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 56));
+ return 0;
+ case 2:
+ /* zero-extend 16 bits into 64 bits */
+ EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 48));
+ return 0;
+ case 4:
+ /* zero-extend 32 bits into 64 bits */
+ EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 32));
+ fallthrough;
+ case 8:
+ /* Nothing to do */
+ return 0;
+ default:
+ return -1;
+ }
+}
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTRL());
+static int sign_extend(u32 *image, struct codegen_context *ctx, u32 src_reg, u32 dst_reg, u32 size)
+{
+ switch (size) {
+ case 1:
+ /* sign-extend 8 bits into 64 bits */
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ return 0;
+ case 2:
+ /* sign-extend 16 bits into 64 bits */
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
+ return 0;
+ case 4:
+ /* sign-extend 32 bits into 64 bits */
+ EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
+ fallthrough;
+ case 8:
+ /* Nothing to do */
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Handle powerpc ABI expectations from caller:
+ * - Unsigned arguments are zero-extended.
+ * - Signed arguments are sign-extended.
+ */
+static int prepare_for_kfunc_call(const struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx,
+ const struct bpf_insn *insn)
+{
+ const struct btf_func_model *m = bpf_jit_find_kfunc_model(fp, insn);
+ int i;
+
+ if (!m)
+ return -1;
+
+ for (i = 0; i < m->nr_args; i++) {
+ /* Note that BPF ABI only allows up to 5 args for kfuncs */
+ u32 reg = bpf_to_ppc(BPF_REG_1 + i), size = m->arg_size[i];
+
+ if (!(m->arg_flags[i] & BTF_FMODEL_SIGNED_ARG)) {
+ if (zero_extend(image, ctx, reg, reg, size))
+ return -1;
+ } else {
+ if (sign_extend(image, ctx, reg, reg, size))
+ return -1;
+ }
+ }
return 0;
}
@@ -701,14 +755,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* special mov32 for zext */
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
- } else if (off == 8) {
- EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
- } else if (off == 16) {
- EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
- } else if (off == 32) {
- EMIT(PPC_RAW_EXTSW(dst_reg, src_reg));
- } else if (dst_reg != src_reg)
- EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ }
+ if (off == 0) {
+ /* MOV */
+ if (dst_reg != src_reg)
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ } else {
+ /* MOVSX: dst = (s8,s16,s32)src (off = 8,16,32) */
+ if (sign_extend(image, ctx, src_reg, dst_reg, off / 8))
+ return -1;
+ }
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
@@ -1102,11 +1158,13 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (ret < 0)
return ret;
- if (func_addr_fixed)
- ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
- else
- ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
+ /* Take care of powerpc ABI requirements before kfunc call */
+ if (insn[i].src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ if (prepare_for_kfunc_call(fp, image, ctx, &insn[i]))
+ return -1;
+ }
+ ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
if (ret)
return ret;
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index 2b5d187d9b62..9ef8fb39dd1b 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -155,8 +155,8 @@ machine_device_initcall(mpc83xx_km, mpc83xx_declare_of_platform_devices);
/* list of the supported boards */
static char *board[] __initdata = {
- "Keymile,KMETER1",
- "Keymile,kmpbec8321",
+ "keymile,KMETER1",
+ "keymile,kmpbec8321",
NULL
};
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index 3c830a6f7ef4..2306ce3e5f22 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -114,29 +114,6 @@ void machine_shutdown(void)
#endif
}
-static void machine_kexec_mask_interrupts(void)
-{
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
-
- chip = irq_desc_get_chip(desc);
- if (!chip)
- continue;
-
- if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
- }
-}
-
/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 21ae93cbd8e4..ef622c3f88e5 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -168,7 +168,7 @@ static __always_inline void __stackleak_poison(unsigned long erase_low,
" j 4f\n"
"3: mvc 8(1,%[addr]),0(%[addr])\n"
"4:\n"
- : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp)
+ : [addr] "+&a" (erase_low), [count] "+&a" (count), [tmp] "=&a" (tmp)
: [poison] "d" (poison)
: "memory", "cc"
);
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index fb924a8041dc..76d7ca64d231 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -29,8 +29,8 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
"3:\n"
- : : "d" (bytes), "a" (p1), "a" (p2)
- : "0", "1", "cc", "memory");
+ : "+d" (bytes), "+a" (p1), "+a" (p2)
+ : : "0", "1", "cc", "memory");
}
static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
diff --git a/arch/s390/mm/pfault.c b/arch/s390/mm/pfault.c
index 1aac13bb8f53..93690bb5dc1b 100644
--- a/arch/s390/mm/pfault.c
+++ b/arch/s390/mm/pfault.c
@@ -61,7 +61,7 @@ int __pfault_init(void)
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
: [rc] "+d" (rc)
- : [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
+ : [refbk] "a" (virt_to_phys(&pfault_init_refbk)), "m" (pfault_init_refbk)
: "cc");
return rc;
}
@@ -83,7 +83,7 @@ void __pfault_fini(void)
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:
- : [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
+ : [refbk] "a" (virt_to_phys(&pfault_fini_refbk)), "m" (pfault_fini_refbk)
: "cc");
}
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index a93e36338866..25601e65b387 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -357,6 +357,7 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
MSR_AMD64_SNP_VMSA_REG_PROT | \
MSR_AMD64_SNP_RESERVED_BIT13 | \
MSR_AMD64_SNP_RESERVED_BIT15 | \
+ MSR_AMD64_SNP_RESERVED_BITS18_22 | \
MSR_AMD64_SNP_RESERVED_MASK)
/*
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index de1df0cb45da..d5329211b1a7 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -78,6 +78,7 @@ static const char * const sev_status_feat_names[] = {
[MSR_AMD64_SNP_IBS_VIRT_BIT] = "IBSVirt",
[MSR_AMD64_SNP_VMSA_REG_PROT_BIT] = "VMSARegProt",
[MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt",
+ [MSR_AMD64_SNP_IBPB_ON_ENTRY_BIT] = "IBPBOnEntry",
};
/* For early boot hypervisor communication in SEV-ES enabled guests */
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 4a57a9948c74..ddde2f1d0bd2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4031,6 +4031,17 @@ static u64 intel_pmu_freq_start_period(struct perf_event *event)
return start;
}
+static inline int intel_set_branch_counter_constr(struct perf_event *event,
+ int *num)
+{
+ if (branch_sample_call_stack(event))
+ return -EINVAL;
+ if (branch_sample_counters(event))
+ (*num)++;
+
+ return 0;
+}
+
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -4090,17 +4101,19 @@ static int intel_pmu_hw_config(struct perf_event *event)
* group, which requires the extra space to store the counters.
*/
leader = event->group_leader;
- if (branch_sample_call_stack(leader))
+ if (intel_set_branch_counter_constr(leader, &num))
return -EINVAL;
- if (branch_sample_counters(leader))
- num++;
leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
for_each_sibling_event(sibling, leader) {
- if (branch_sample_call_stack(sibling))
+ if (intel_set_branch_counter_constr(sibling, &num))
+ return -EINVAL;
+ }
+
+ /* event isn't installed as a sibling yet. */
+ if (event != leader) {
+ if (intel_set_branch_counter_constr(event, &num))
return -EINVAL;
- if (branch_sample_counters(sibling))
- num++;
}
if (num > fls(x86_pmu.lbr_counters))
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 543609d1231e..c453ee7a5207 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -6597,17 +6597,8 @@ void spr_uncore_mmio_init(void)
/* GNR uncore support */
#define UNCORE_GNR_NUM_UNCORE_TYPES 23
-#define UNCORE_GNR_TYPE_15 15
-#define UNCORE_GNR_B2UPI 18
-#define UNCORE_GNR_TYPE_21 21
-#define UNCORE_GNR_TYPE_22 22
int gnr_uncore_units_ignore[] = {
- UNCORE_SPR_UPI,
- UNCORE_GNR_TYPE_15,
- UNCORE_GNR_B2UPI,
- UNCORE_GNR_TYPE_21,
- UNCORE_GNR_TYPE_22,
UNCORE_IGNORE_END
};
@@ -6616,6 +6607,57 @@ static struct intel_uncore_type gnr_uncore_ubox = {
.attr_update = uncore_alias_groups,
};
+static struct uncore_event_desc gnr_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0, "event=0x05,umask=0xcf"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.unit, "MiB"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1, "event=0x06,umask=0xcf"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.unit, "MiB"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0, "event=0x05,umask=0xf0"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.unit, "MiB"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1, "event=0x06,umask=0xf0"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.scale, "6.103515625e-5"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.unit, "MiB"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type gnr_uncore_imc = {
+ SPR_UNCORE_MMIO_COMMON_FORMAT(),
+ .name = "imc",
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
+ .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
+ .event_descs = gnr_uncore_imc_events,
+};
+
+static struct intel_uncore_type gnr_uncore_pciex8 = {
+ SPR_UNCORE_PCI_COMMON_FORMAT(),
+ .name = "pciex8",
+};
+
+static struct intel_uncore_type gnr_uncore_pciex16 = {
+ SPR_UNCORE_PCI_COMMON_FORMAT(),
+ .name = "pciex16",
+};
+
+static struct intel_uncore_type gnr_uncore_upi = {
+ SPR_UNCORE_PCI_COMMON_FORMAT(),
+ .name = "upi",
+};
+
+static struct intel_uncore_type gnr_uncore_b2upi = {
+ SPR_UNCORE_PCI_COMMON_FORMAT(),
+ .name = "b2upi",
+};
+
+static struct intel_uncore_type gnr_uncore_b2hot = {
+ .name = "b2hot",
+ .attr_update = uncore_alias_groups,
+};
+
static struct intel_uncore_type gnr_uncore_b2cmi = {
SPR_UNCORE_PCI_COMMON_FORMAT(),
.name = "b2cmi",
@@ -6638,23 +6680,23 @@ static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
NULL,
&spr_uncore_pcu,
&gnr_uncore_ubox,
- &spr_uncore_imc,
- NULL,
- NULL,
- NULL,
- NULL,
+ &gnr_uncore_imc,
NULL,
+ &gnr_uncore_upi,
NULL,
NULL,
NULL,
+ &spr_uncore_cxlcm,
+ &spr_uncore_cxldp,
NULL,
+ &gnr_uncore_b2hot,
&gnr_uncore_b2cmi,
&gnr_uncore_b2cxl,
- NULL,
+ &gnr_uncore_b2upi,
NULL,
&gnr_uncore_mdf_sbo,
- NULL,
- NULL,
+ &gnr_uncore_pciex16,
+ &gnr_uncore_pciex8,
};
static struct freerunning_counters gnr_iio_freerunning[] = {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c6c8c21106ef..bb04de781b69 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2385,7 +2385,14 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT | \
KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
- KVM_X86_QUIRK_SLOT_ZAP_ALL)
+ KVM_X86_QUIRK_SLOT_ZAP_ALL | \
+ KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \
+ KVM_X86_QUIRK_IGNORE_GUEST_PAT | \
+ KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM)
+
+#define KVM_X86_CONDITIONAL_QUIRKS \
+ (KVM_X86_QUIRK_CD_NW_CLEARED | \
+ KVM_X86_QUIRK_IGNORE_GUEST_PAT)
/*
* KVM previously used a u32 field in kvm_run to indicate the hypercall was
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b67280d761f6..7604161a7785 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -691,7 +691,10 @@
#define MSR_AMD64_SNP_VMSA_REG_PROT BIT_ULL(MSR_AMD64_SNP_VMSA_REG_PROT_BIT)
#define MSR_AMD64_SNP_SMT_PROT_BIT 17
#define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT)
-#define MSR_AMD64_SNP_RESV_BIT 18
+#define MSR_AMD64_SNP_RESERVED_BITS18_22 GENMASK_ULL(22, 18)
+#define MSR_AMD64_SNP_IBPB_ON_ENTRY_BIT 23
+#define MSR_AMD64_SNP_IBPB_ON_ENTRY BIT_ULL(MSR_AMD64_SNP_IBPB_ON_ENTRY_BIT)
+#define MSR_AMD64_SNP_RESV_BIT 24
#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a8debbf2f702..64cdf9763c0e 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -440,6 +440,9 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
+#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)
+#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)
+#define KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM (1 << 10)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index c5fb28e6451a..21dc37cca20e 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1887,6 +1887,7 @@ void __init check_x2apic(void)
static inline void try_to_enable_x2apic(int remap_mode) { }
static inline void __x2apic_enable(void) { }
+static inline void __x2apic_disable(void) { }
#endif /* !CONFIG_X86_X2APIC */
void __init enable_IR_x2apic(void)
@@ -2449,6 +2450,11 @@ static void lapic_resume(void)
if (x2apic_mode) {
__x2apic_enable();
} else {
+ if (x2apic_enabled()) {
+ pr_warn_once("x2apic: re-enabled by firmware during resume. Disabling\n");
+ __x2apic_disable();
+ }
+
/*
* Make sure the APICBASE points to the right address
*
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 7fef504ca508..7b781f4fa104 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1708,8 +1708,22 @@ static void __init uv_system_init_hub(void)
struct uv_hub_info_s *new_hub;
/* Allocate & fill new per hub info list */
- new_hub = (bid == 0) ? &uv_hub_info_node0
- : kzalloc_node(bytes, GFP_KERNEL, uv_blade_to_node(bid));
+ if (bid == 0) {
+ new_hub = &uv_hub_info_node0;
+ } else {
+ int nid;
+
+ /*
+ * Deconfigured sockets are mapped to SOCK_EMPTY. Use
+ * NUMA_NO_NODE to allocate on a valid node.
+ */
+ nid = uv_blade_to_node(bid);
+ if (nid == SOCK_EMPTY)
+ nid = NUMA_NO_NODE;
+
+ new_hub = kzalloc_node(bytes, GFP_KERNEL, nid);
+ }
+
if (WARN_ON_ONCE(!new_hub)) {
/* do not kfree() bid 0, which is statically allocated */
while (--bid > 0)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 9194695662b2..6abb25ca9cd1 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -1223,3 +1223,27 @@ bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
else
return regs->sp <= ret->stack;
}
+
+#ifdef CONFIG_IA32_EMULATION
+unsigned long arch_uprobe_get_xol_area(void)
+{
+ struct thread_info *ti = current_thread_info();
+ unsigned long vaddr;
+
+ /*
+ * HACK: we are not in a syscall, but x86 get_unmapped_area() paths
+ * ignore TIF_ADDR32 and rely on in_32bit_syscall() to calculate
+ * vm_unmapped_area_info.high_limit.
+ *
+ * The #ifdef above doesn't cover the CONFIG_X86_X32_ABI=y case,
+ * but in this case in_32bit_syscall() -> in_x32_syscall() always
+ * (falsely) returns true because ->orig_ax == -1.
+ */
+ if (test_thread_flag(TIF_ADDR32))
+ ti->status |= TS_COMPAT;
+ vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
+ ti->status &= ~TS_COMPAT;
+
+ return vaddr;
+}
+#endif
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index e9322358678b..1d9c919c9232 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -222,7 +222,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return -(u32)fault & errcode;
}
-bool kvm_mmu_may_ignore_guest_pat(void);
+bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 700926eb77df..31921b6658dd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4713,17 +4713,19 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
}
#endif
-bool kvm_mmu_may_ignore_guest_pat(void)
+bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm)
{
/*
* When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
* has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
* honor the memtype from the guest's PAT so that guest accesses to
* memory that is DMA'd aren't cached against the guest's wishes. As a
- * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA,
- * KVM _always_ ignores guest PAT (when EPT is enabled).
+ * result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA.
+ * KVM _always_ ignores guest PAT, when EPT is enabled and when quirk
+ * KVM_X86_QUIRK_IGNORE_GUEST_PAT is enabled or the CPU lacks the
+ * ability to safely honor guest PAT.
*/
- return shadow_memtype_mask;
+ return kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT);
}
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 63dea8ecd7ef..d539e95a2f8d 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -82,15 +82,34 @@ struct amd_svm_iommu_ir {
void *data; /* Storing pointer to struct amd_ir_data */
};
+static u32 avic_get_max_physical_id(struct kvm_vcpu *vcpu)
+{
+ u32 arch_max;
+
+ if (x2avic_enabled && apic_x2apic_mode(vcpu->arch.apic))
+ arch_max = X2AVIC_MAX_PHYSICAL_ID;
+ else
+ arch_max = AVIC_MAX_PHYSICAL_ID;
+
+ /*
+ * Despite its name, KVM_CAP_MAX_VCPU_ID represents the maximum APIC ID
+ * plus one, so the max possible APIC ID is one less than that.
+ */
+ return min(vcpu->kvm->arch.max_vcpu_ids - 1, arch_max);
+}
+
static void avic_activate_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
-
+ vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu);
vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+ svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
+
/*
* Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR
* accesses, while interrupt injection to a running vCPU can be
@@ -100,7 +119,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
*/
if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) {
vmcb->control.int_ctl |= X2APIC_MODE_MASK;
- vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
+
/* Disabling MSR intercept for x2APIC registers */
svm_set_x2apic_msr_interception(svm, false);
} else {
@@ -110,8 +129,6 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
*/
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
- /* For xAVIC and hybrid-xAVIC modes */
- vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
/* Enabling MSR intercept for x2APIC registers */
svm_set_x2apic_msr_interception(svm, true);
}
@@ -124,6 +141,9 @@ static void avic_deactivate_vmcb(struct vcpu_svm *svm)
vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
+ if (!sev_es_guest(svm->vcpu.kvm))
+ svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
+
/*
* If running nested and the guest uses its own MSR bitmap, there
* is no need to update L0's msr bitmap
@@ -253,7 +273,7 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
- if (kvm_apicv_activated(svm->vcpu.kvm))
+ if (kvm_vcpu_apicv_active(&svm->vcpu))
avic_activate_vmcb(svm);
else
avic_deactivate_vmcb(svm);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 853a86dfc8f1..fb9e62a167b8 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1246,8 +1246,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
- if (!kvm_vcpu_apicv_active(vcpu))
- svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
+ svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm);
@@ -1360,7 +1359,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
- if (kvm_vcpu_apicv_active(vcpu))
+ if (enable_apicv && irqchip_in_kernel(vcpu->kvm))
avic_init_vmcb(svm, vmcb);
if (vnmi)
@@ -1390,7 +1389,9 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
svm_init_osvw(vcpu);
- vcpu->arch.microcode_version = 0x01000065;
+
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
+ vcpu->arch.microcode_version = 0x01000065;
svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
svm->nmi_masked = false;
@@ -2862,9 +2863,11 @@ static int dr_interception(struct kvm_vcpu *vcpu)
static int cr8_write_interception(struct kvm_vcpu *vcpu)
{
+ u8 cr8_prev = kvm_get_cr8(vcpu);
int r;
- u8 cr8_prev = kvm_get_cr8(vcpu);
+ WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu));
+
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(vcpu);
if (lapic_in_kernel(vcpu))
@@ -5560,6 +5563,7 @@ static __init int svm_hardware_setup(void)
*/
allow_smaller_maxphyaddr = !npt_enabled;
+ kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED;
return 0;
err:
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 76f9624d4938..1a7a12af4a3a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3022,6 +3022,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3)))
return -EINVAL;
+ if (CC(vmcs12->host_cr4 & X86_CR4_CET && !(vmcs12->host_cr0 & X86_CR0_WP)))
+ return -EINVAL;
+
if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
return -EINVAL;
@@ -3136,11 +3139,28 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
return -EINVAL;
- if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
- (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
- CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false))))
+ if (CC(vmcs12->guest_cr4 & X86_CR4_CET && !(vmcs12->guest_cr0 & X86_CR0_WP)))
return -EINVAL;
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ u64 debugctl = vmcs12->guest_ia32_debugctl;
+
+ /*
+ * FREEZE_IN_SMM is not virtualized, but allow L1 to set it in
+ * vmcs12's DEBUGCTL under a quirk for backwards compatibility.
+ * Note that the quirk only relaxes the consistency check. The
+ * vmcc02 bit is still under the control of the host. In
+ * particular, if a host administrator decides to clear the bit,
+ * then L1 has no say in the matter.
+ */
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM))
+ debugctl &= ~DEBUGCTLMSR_FREEZE_IN_SMM;
+
+ if (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
+ CC(!vmx_is_valid_debugctl(vcpu, debugctl, false)))
+ return -EINVAL;
+ }
+
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
return -EINVAL;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 412b4fb8a143..b8aa9ef73e7a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4562,7 +4562,8 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
* Update the nested MSR settings so that a nested VMM can/can't set
* controls for features that are/aren't exposed to the guest.
*/
- if (nested) {
+ if (nested &&
+ kvm_check_has_quirk(vmx->vcpu.kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
/*
* All features that can be added or removed to VMX MSRs must
* be supported in the first place for nested virtualization.
@@ -4853,7 +4854,8 @@ static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
init_vmcs(vmx);
- if (nested)
+ if (nested &&
+ kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
vcpu_setup_sgx_lepubkeyhash(vcpu);
@@ -4866,7 +4868,8 @@ static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
#endif
- vcpu->arch.microcode_version = 0x100000000ULL;
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS))
+ vcpu->arch.microcode_version = 0x100000000ULL;
vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
/*
@@ -7662,6 +7665,17 @@ int vmx_vm_init(struct kvm *kvm)
return 0;
}
+static inline bool vmx_ignore_guest_pat(struct kvm *kvm)
+{
+ /*
+ * Non-coherent DMA devices need the guest to flush CPU properly.
+ * In that case it is not possible to map all guest RAM as WB, so
+ * always trust guest PAT.
+ */
+ return !kvm_arch_has_noncoherent_dma(kvm) &&
+ kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT);
+}
+
u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
/*
@@ -7671,13 +7685,8 @@ u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (is_mmio)
return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
- /*
- * Force WB and ignore guest PAT if the VM does NOT have a non-coherent
- * device attached. Letting the guest control memory types on Intel
- * CPUs may result in unexpected behavior, and so KVM's ABI is to trust
- * the guest to behave only as a last resort.
- */
- if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ /* Force WB if ignoring guest PAT */
+ if (vmx_ignore_guest_pat(vcpu->kvm))
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT);
@@ -8576,6 +8585,27 @@ __init int vmx_hardware_setup(void)
kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
+ /*
+ * On Intel CPUs that lack self-snoop feature, letting the guest control
+ * memory types may result in unexpected behavior. So always ignore guest
+ * PAT on those CPUs and map VM as writeback, not allowing userspace to
+ * disable the quirk.
+ *
+ * On certain Intel CPUs (e.g. SPR, ICX), though self-snoop feature is
+ * supported, UC is slow enough to cause issues with some older guests (e.g.
+ * an old version of bochs driver uses ioremap() instead of ioremap_wc() to
+ * map the video RAM, causing wayland desktop to fail to get started
+ * correctly). To avoid breaking those older guests that rely on KVM to force
+ * memory type to WB, provide KVM_X86_QUIRK_IGNORE_GUEST_PAT to preserve the
+ * safer (for performance) default behavior.
+ *
+ * On top of this, non-coherent DMA devices need the guest to flush CPU
+ * caches properly. This also requires honoring guest PAT, and is forced
+ * independent of the quirk in vmx_ignore_guest_pat().
+ */
+ if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+ kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+ kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
return r;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0d9035993ed3..8a52b7bb821a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4801,7 +4801,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
break;
case KVM_CAP_DISABLE_QUIRKS2:
- r = KVM_X86_VALID_QUIRKS;
+ r = kvm_caps.supported_quirks;
break;
case KVM_CAP_X86_NOTIFY_VMEXIT:
r = kvm_caps.has_notify_vmexit;
@@ -6534,11 +6534,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
switch (cap->cap) {
case KVM_CAP_DISABLE_QUIRKS2:
r = -EINVAL;
- if (cap->args[0] & ~KVM_X86_VALID_QUIRKS)
+ if (cap->args[0] & ~kvm_caps.supported_quirks)
break;
fallthrough;
case KVM_CAP_DISABLE_QUIRKS:
- kvm->arch.disabled_quirks = cap->args[0];
+ kvm->arch.disabled_quirks |= cap->args[0] & kvm_caps.supported_quirks;
r = 0;
break;
case KVM_CAP_SPLIT_IRQCHIP: {
@@ -9782,6 +9782,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
}
+ kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
+ kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
rdmsrl_safe(MSR_EFER, &kvm_host.efer);
@@ -9826,6 +9828,10 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (IS_ENABLED(CONFIG_KVM_SW_PROTECTED_VM) && tdp_mmu_enabled)
kvm_caps.supported_vm_types |= BIT(KVM_X86_SW_PROTECTED_VM);
+ /* KVM always ignores guest PAT for shadow paging. */
+ if (!tdp_enabled)
+ kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
kvm_caps.supported_xss = 0;
@@ -12383,7 +12389,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_async_pf_hash_reset(vcpu);
- vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_STUFF_FEATURE_MSRS)) {
+ vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
+ vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
+ vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
+ }
kvm_pmu_init(vcpu);
vcpu->arch.pending_external_vector = -1;
@@ -12397,8 +12407,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (r)
goto free_guest_fpu;
- vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
- vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_xen_init_vcpu(vcpu);
vcpu_load(vcpu);
kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
@@ -12778,6 +12786,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Decided by the vendor code for other VM types. */
kvm->arch.pre_fault_allowed =
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
+ kvm->arch.disabled_quirks = kvm_caps.inapplicable_quirks & kvm_caps.supported_quirks;
ret = kvm_page_track_init(kvm);
if (ret)
@@ -13596,7 +13605,7 @@ static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
* (or last) non-coherent device is (un)registered to so that new SPTEs
* with the correct "ignore guest PAT" setting are created.
*/
- if (kvm_mmu_may_ignore_guest_pat())
+ if (kvm_mmu_may_ignore_guest_pat(kvm))
kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index ec623d23d13d..a1bd382232f4 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -32,6 +32,9 @@ struct kvm_caps {
u64 supported_xcr0;
u64 supported_xss;
u64 supported_perf_cap;
+
+ u64 supported_quirks;
+ u64 inapplicable_quirks;
};
struct kvm_host_values {
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5a5525d10a5e..3f7cb9d891aa 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -110,12 +110,6 @@ static struct cgroup_subsys_state *blkcg_css(void)
return task_css(current, io_cgrp_id);
}
-static bool blkcg_policy_enabled(struct request_queue *q,
- const struct blkcg_policy *pol)
-{
- return pol && test_bit(pol->plid, q->blkcg_pols);
-}
-
static void blkg_free_workfn(struct work_struct *work)
{
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index b9e3265c1eb3..112bf11d0fad 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -455,6 +455,12 @@ static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
}
+static inline bool blkcg_policy_enabled(struct request_queue *q,
+ const struct blkcg_policy *pol)
+{
+ return pol && test_bit(pol->plid, q->blkcg_pols);
+}
+
void blk_cgroup_bio_start(struct bio *bio);
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
#else /* CONFIG_BLK_CGROUP */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 4aa66c07d2e8..bdf363868ac0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1209,17 +1209,13 @@ static int blk_throtl_init(struct gendisk *disk)
INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
throtl_service_queue_init(&td->service_queue);
- /*
- * Freeze queue before activating policy, to synchronize with IO path,
- * which is protected by 'q_usage_counter'.
- */
blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue);
q->td = td;
td->queue = q;
- /* activate policy */
+ /* activate policy, blk_throtl_activated() will return true */
ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
if (ret) {
q->td = NULL;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 1a36d1278eea..e1b5343cd43f 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -154,7 +154,13 @@ void blk_throtl_cancel_bios(struct gendisk *disk);
static inline bool blk_throtl_activated(struct request_queue *q)
{
- return q->td != NULL;
+ /*
+ * q->td guarantees that the blk-throttle module is already loaded,
+ * and the plid of blk-throttle is assigned.
+ * blkcg_policy_enabled() guarantees that the policy is activated
+ * in the request_queue.
+ */
+ return q->td != NULL && blkcg_policy_enabled(q, &blkcg_policy_throtl);
}
static inline bool blk_should_throtl(struct bio *bio)
@@ -162,11 +168,6 @@ static inline bool blk_should_throtl(struct bio *bio)
struct throtl_grp *tg;
int rw = bio_data_dir(bio);
- /*
- * This is called under bio_queue_enter(), and it's synchronized with
- * the activation of blk-throtl, which is protected by
- * blk_mq_freeze_queue().
- */
if (!blk_throtl_activated(bio->bi_bdev->bd_queue))
return false;
@@ -192,7 +193,10 @@ static inline bool blk_should_throtl(struct bio *bio)
static inline bool blk_throtl_bio(struct bio *bio)
{
-
+ /*
+ * block throttling takes effect if the policy is activated
+ * in the bio's request_queue.
+ */
if (!blk_should_throtl(bio))
return false;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index d8674aee28c2..848a012cd19f 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -113,6 +113,10 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (ide_dev) {
errata.piix4.bmisx = pci_resource_start(ide_dev, 4);
+ if (errata.piix4.bmisx)
+ dev_dbg(&ide_dev->dev,
+ "Bus master activity detection (BM-IDE) erratum enabled\n");
+
pci_dev_put(ide_dev);
}
@@ -131,20 +135,17 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
if (isa_dev) {
pci_read_config_byte(isa_dev, 0x76, &value1);
pci_read_config_byte(isa_dev, 0x77, &value2);
- if ((value1 & 0x80) || (value2 & 0x80))
+ if ((value1 & 0x80) || (value2 & 0x80)) {
errata.piix4.fdma = 1;
+ dev_dbg(&isa_dev->dev,
+ "Type-F DMA livelock erratum (C3 disabled)\n");
+ }
pci_dev_put(isa_dev);
}
break;
}
- if (ide_dev)
- dev_dbg(&ide_dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n");
-
- if (isa_dev)
- dev_dbg(&isa_dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n");
-
return 0;
}
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index f2c943b934be..9470f1830ff5 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -389,6 +389,19 @@ static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
},
},
+ /*
+ * The screen backlight turns off during udev device creation
+ * when returning true for _OSI("Windows 2009")
+ */
+ {
+ .callback = dmi_disable_osi_win7,
+ .ident = "Acer Aspire One D255",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOD255"),
+ },
+ },
+
/*
* The wireless hotkey does not work on those machines when
* returning true for _OSI("Windows 2012")
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 70af3fbbebe5..6537644faf38 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1649,7 +1649,7 @@ acpi_status __init acpi_os_initialize(void)
* Use acpi_os_map_generic_address to pre-map the reset
* register if it's in system memory.
*/
- void *rv;
+ void __iomem *rv;
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
pr_debug("%s: Reset register mapping %s\n", __func__,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index c8ee8e42b0f6..0b7fa4a8c379 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -386,6 +386,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G70-35",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80Q5"),
+ },
+ },
/*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 237d8fd2a2cf..3e02402329a6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4101,7 +4101,11 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
ATA_QUIRK_FIRMWARE_WARN },
+ /* ADATA devices with LPM issues. */
+ { "ADATA SU680", NULL, ATA_QUIRK_NOLPM },
+
/* Seagate disks with LPM issues */
+ { "ST1000DM010-2EP102", NULL, ATA_QUIRK_NOLPM },
{ "ST2000DM008-2FR102", NULL, ATA_QUIRK_NOLPM },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
@@ -4144,6 +4148,7 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Devices that do not need bridging limits applied */
{ "MTRON MSP-SATA*", NULL, ATA_QUIRK_BRIDGE_OK },
{ "BUFFALO HD-QSU2/R5", NULL, ATA_QUIRK_BRIDGE_OK },
+ { "QEMU HARDDISK", "2.5+", ATA_QUIRK_BRIDGE_OK },
/* Devices which aren't very happy with higher link speeds */
{ "WD My Book", NULL, ATA_QUIRK_1_5_GBPS },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d7c88a111ea3..b55443e31f40 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1920,17 +1920,19 @@ static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
unsigned int (*actor)(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 *rbuf))
{
- unsigned int rc;
unsigned long flags;
+ unsigned int len;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
- rc = actor(dev, cmd, ata_scsi_rbuf);
- if (rc == 0) {
+ len = actor(dev, cmd, ata_scsi_rbuf);
+ if (len) {
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
cmd->result = SAM_STAT_GOOD;
+ if (scsi_bufflen(cmd) > len)
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
}
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
@@ -2018,7 +2020,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
else
memcpy(rbuf + 58, versions, sizeof(versions));
- return 0;
+ /*
+ * Include all 8 possible version descriptors, even if not all of
+ * them are popoulated.
+ */
+ return 96;
}
/**
@@ -2055,7 +2061,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2082,7 +2089,8 @@ static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
memcpy(rbuf, hdr, sizeof(hdr));
ata_id_string(dev->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2143,7 +2151,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
num += ATA_ID_WWN_LEN;
}
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2180,7 +2189,8 @@ static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
rbuf[56] = ATA_CMD_ID_ATA;
memcpy(&rbuf[60], &dev->id[0], 512);
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2231,7 +2241,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
put_unaligned_be32(1, &rbuf[28]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2261,7 +2271,7 @@ static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
if (zoned)
rbuf[8] = (zoned << 4);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2284,7 +2294,7 @@ static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
rbuf[3] = 0x4;
rbuf[5] = 1 << 6; /* TPWS */
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2304,7 +2314,7 @@ static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
{
if (!ata_dev_is_zac(dev)) {
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- return 1;
+ return 0;
}
/*
@@ -2322,7 +2332,7 @@ static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2346,7 +2356,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
if (!cpr_log) {
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- return 1;
+ return 0;
}
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
@@ -2360,7 +2370,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
@@ -2382,7 +2392,7 @@ static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
/* is CmdDt set? */
if (scsicmd[1] & 2) {
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- return 1;
+ return 0;
}
/* Is EVPD clear? */
@@ -2410,7 +2420,7 @@ static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
return ata_scsiop_inq_b9(dev, cmd, rbuf);
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- return 1;
+ return 0;
}
}
@@ -2741,24 +2751,27 @@ static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
- } else {
- put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
- rbuf[3] |= dpofua;
- if (ebd) {
- rbuf[7] = sizeof(sat_blk_desc);
- memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
- }
+
+ return rbuf[0] + 1;
}
- return 0;
+
+ put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
+ rbuf[3] |= dpofua;
+ if (ebd) {
+ rbuf[7] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+
+ return get_unaligned_be16(&rbuf[0]) + 2;
invalid_fld:
ata_scsi_set_invalid_field(dev, cmd, fp, bp);
- return 1;
+ return 0;
saving_not_supp:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
- return 1;
+ return 0;
}
/**
@@ -2801,7 +2814,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
rbuf[6] = sector_size >> (8 * 1);
rbuf[7] = sector_size;
- return 0;
+ return 8;
}
/*
@@ -2811,7 +2824,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
(scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- return 1;
+ return 0;
}
/* sector count, 64-bit */
@@ -2846,7 +2859,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
}
}
- return 0;
+ return 16;
}
/**
@@ -2865,7 +2878,7 @@ static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
{
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
- return 0;
+ return 16;
}
/*
@@ -3593,13 +3606,13 @@ static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- return 1;
+ return 0;
}
if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- return 1;
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
}
switch (cdb[3]) {
@@ -3672,7 +3685,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
rbuf[0] = rwcdlp;
rbuf[1] = cdlp | supported;
- return 0;
+ return 4;
}
/**
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 425c44f1e4d3..167ff6f7a3fe 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1856,6 +1856,7 @@ void pm_runtime_reinit(struct device *dev)
void pm_runtime_remove(struct device *dev)
{
__pm_runtime_disable(dev, false);
+ flush_work(&dev->power.work);
pm_runtime_reinit(dev);
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 837d77e3af2b..4217d00c76fd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -759,7 +759,18 @@ struct fwnode_handle *
fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
- return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
+ struct fwnode_handle *next;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return NULL;
+
+ /* Try to find a child in primary fwnode */
+ next = fwnode_call_ptr_op(fwnode, get_next_child_node, child);
+ if (next)
+ return next;
+
+ /* When no more children in primary, continue with secondary */
+ return fwnode_call_ptr_op(fwnode->secondary, get_next_child_node, child);
}
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
@@ -803,19 +814,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child)
{
- const struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct fwnode_handle *next;
-
- if (IS_ERR_OR_NULL(fwnode))
- return NULL;
-
- /* Try to find a child in primary fwnode */
- next = fwnode_get_next_child_node(fwnode, child);
- if (next)
- return next;
-
- /* When no more children in primary, continue with secondary */
- return fwnode_get_next_child_node(fwnode->secondary, child);
+ return fwnode_get_next_child_node(dev_fwnode(dev), child);
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index dd2c0485b984..372427747cd6 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -804,6 +804,8 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
*/
if (soc_type == QCA_WCN3988)
rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f);
+ else if (soc_type == QCA_WCN3998)
+ rom_ver = ((soc_ver & 0x0000f000) >> 0x07) | (soc_ver & 0x0000000f);
else
rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 1d7dd3d2c101..934c5087ec2b 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -178,11 +178,11 @@ static const struct of_device_id ax45mp_cache_ids[] = {
static int __init ax45mp_cache_init(void)
{
- struct device_node *np;
struct resource res;
int ret;
- np = of_find_matching_node(NULL, ax45mp_cache_ids);
+ struct device_node *np __free(device_node) =
+ of_find_matching_node(NULL, ax45mp_cache_ids);
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/drivers/cache/starfive_starlink_cache.c b/drivers/cache/starfive_starlink_cache.c
index 24c7d078ca22..3a25d2d7c70c 100644
--- a/drivers/cache/starfive_starlink_cache.c
+++ b/drivers/cache/starfive_starlink_cache.c
@@ -102,11 +102,11 @@ static const struct of_device_id starlink_cache_ids[] = {
static int __init starlink_cache_init(void)
{
- struct device_node *np;
u32 block_size;
int ret;
- np = of_find_matching_node(NULL, starlink_cache_ids);
+ struct device_node *np __free(device_node) =
+ of_find_matching_node(NULL, starlink_cache_ids);
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2cb11e5a1125..0e1bbc966135 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -353,16 +353,6 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
- /*
- * If there is only a single idle state (or none), there is nothing
- * meaningful for the governor to choose. Skip the governor and
- * always use state 0 with the tick running.
- */
- if (drv->state_count <= 1) {
- *stop_tick = false;
- return 0;
- }
-
return cpuidle_curr_governor->select(drv, dev, stop_tick);
}
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index b9658e38cb03..63a3f5042a48 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -52,9 +52,10 @@ static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data,
rng->priv = 0;
} else {
work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC);
- if (!work_data)
+ if (!work_data) {
+ atomic_dec(&i2c_priv->tfm_count);
return -ENOMEM;
-
+ }
work_data->ctx = i2c_priv;
work_data->client = i2c_priv->client;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 136fcaeff8dd..852e6714d9f2 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -763,6 +763,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
{
struct mmp_pdma_desc_sw *sw;
u32 curr, residue = 0;
+ unsigned long flags;
bool passed = false;
bool cyclic = chan->cyclic_first != NULL;
@@ -778,6 +779,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
else
curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
list_for_each_entry(sw, &chan->chain_running, node) {
u32 start, end, len;
@@ -821,6 +824,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
continue;
if (sw->async_tx.cookie == cookie) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return residue;
} else {
residue = 0;
@@ -828,6 +832,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
}
}
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
/* We should only get here in case of cyclic transactions */
return residue;
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1bf0e15c1540..423ead5fa9c1 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -257,9 +257,10 @@ static void fwnet_header_cache_update(struct hh_cache *hh,
memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len);
}
-static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+static int fwnet_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr)
{
- memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
+ memcpy(haddr, dev->dev_addr, FWNET_ALEN);
return FWNET_ALEN;
}
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 9516ee870cd2..bec1fbaff7f3 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -206,12 +206,12 @@ static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
return 0;
}
-static int ffa_rxtx_unmap(u16 vm_id)
+static int ffa_rxtx_unmap(void)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
- .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
+ .a0 = FFA_RXTX_UNMAP,
}, &ret);
if (ret.a0 == FFA_ERROR)
@@ -1832,7 +1832,7 @@ static int __init ffa_init(void)
cleanup_notifs:
ffa_notifications_cleanup();
- ffa_rxtx_unmap(drv_info->vm_id);
+ ffa_rxtx_unmap();
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, rxtx_bufsz);
@@ -1847,7 +1847,7 @@ static void __exit ffa_exit(void)
{
ffa_notifications_cleanup();
ffa_partitions_cleanup();
- ffa_rxtx_unmap(drv_info->vm_id);
+ ffa_rxtx_unmap();
free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz);
free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz);
kfree(drv_info);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index f4d47577f83e..2d33771917bb 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -18,6 +18,7 @@
#include <linux/bitmap.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -940,13 +941,13 @@ static int scpi_probe(struct platform_device *pdev)
int idx = scpi_drvinfo->num_chans;
struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
- struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
+ struct device_node *shmem __free(device_node) =
+ of_parse_phandle(np, "shmem", idx);
if (!of_match_node(shmem_of_match, shmem))
return -ENXIO;
ret = of_address_to_resource(shmem, 0, &res);
- of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fff89288e2f4..667ab2bfc8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1403,7 +1403,10 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
*process_info = info;
}
- vm->process_info = *process_info;
+ if (cmpxchg(&vm->process_info, NULL, *process_info) != NULL) {
+ ret = -EINVAL;
+ goto already_acquired;
+ }
/* Validate page directory and attach eviction fence */
ret = amdgpu_bo_reserve(vm->root.bo, true);
@@ -1443,6 +1446,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
amdgpu_bo_unreserve(vm->root.bo);
reserve_pd_fail:
vm->process_info = NULL;
+already_acquired:
if (info) {
dma_fence_put(&info->eviction_fence->base);
*process_info = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 361184355e23..d5e6d5ec69c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2570,8 +2570,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
default:
r = amdgpu_discovery_set_ip_blocks(adev);
- if (r)
+ if (r) {
+ adev->num_ip_blocks = 0;
return r;
+ }
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 78e36d889d0b..c626f66ded18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -82,7 +82,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev == NULL)
+ if (adev == NULL || !adev->num_ip_blocks)
return;
amdgpu_unregister_gpu_instance(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index f28f6b4ba765..27018c0369c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -1362,15 +1362,31 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
__decode_table_header_from_buf(hdr, buf);
- if (hdr->version == RAS_TABLE_VER_V2_1) {
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
control->ras_record_offset = RAS_RECORD_START_V2_1;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
- } else {
+ break;
+ case RAS_TABLE_VER_V1:
control->ras_num_recs = RAS_NUM_RECS(hdr);
control->ras_record_offset = RAS_RECORD_START;
control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
}
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
if (hdr->header == RAS_TABLE_HDR_VAL) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 78c527b56f7c..91c6464efed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -688,28 +688,35 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
} else {
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(9, 0, 0):
- mmhub_cid = mmhub_client_ids_vega10[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega10) ?
+ mmhub_client_ids_vega10[cid][rw] : NULL;
break;
case IP_VERSION(9, 3, 0):
- mmhub_cid = mmhub_client_ids_vega12[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega12) ?
+ mmhub_client_ids_vega12[cid][rw] : NULL;
break;
case IP_VERSION(9, 4, 0):
- mmhub_cid = mmhub_client_ids_vega20[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega20) ?
+ mmhub_client_ids_vega20[cid][rw] : NULL;
break;
case IP_VERSION(9, 4, 1):
- mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_arcturus) ?
+ mmhub_client_ids_arcturus[cid][rw] : NULL;
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 0):
- mmhub_cid = mmhub_client_ids_raven[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_raven) ?
+ mmhub_client_ids_raven[cid][rw] : NULL;
break;
case IP_VERSION(1, 5, 0):
case IP_VERSION(2, 4, 0):
- mmhub_cid = mmhub_client_ids_renoir[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_renoir) ?
+ mmhub_client_ids_renoir[cid][rw] : NULL;
break;
case IP_VERSION(1, 8, 0):
case IP_VERSION(9, 4, 2):
- mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_aldebaran) ?
+ mmhub_client_ids_aldebaran[cid][rw] : NULL;
break;
default:
mmhub_cid = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 25b175f23312..ccd9055360fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -677,11 +677,6 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
- if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
- mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
- else
- dev_info_once(mes->adev->dev,
- "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 19cbf80fa321..333a31da8556 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -615,11 +615,6 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
- if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
- mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
- else
- dev_info_once(adev->dev,
- "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index a0cc8e218ca1..534cb4c544dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -154,14 +154,17 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 2):
- mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_navi1x) ?
+ mmhub_client_ids_navi1x[cid][rw] : NULL;
break;
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
- mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_sienna_cichlid) ?
+ mmhub_client_ids_sienna_cichlid[cid][rw] : NULL;
break;
case IP_VERSION(2, 1, 2):
- mmhub_cid = mmhub_client_ids_beige_goby[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_beige_goby) ?
+ mmhub_client_ids_beige_goby[cid][rw] : NULL;
break;
default:
mmhub_cid = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 5eb8122e2746..ceb2f6b46de5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -94,7 +94,8 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
case IP_VERSION(2, 3, 0):
case IP_VERSION(2, 4, 0):
case IP_VERSION(2, 4, 1):
- mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vangogh) ?
+ mmhub_client_ids_vangogh[cid][rw] : NULL;
break;
default:
mmhub_cid = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 7d5242df58a5..ab966e69a342 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -110,7 +110,8 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 1):
- mmhub_cid = mmhub_client_ids_v3_0_0[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_0) ?
+ mmhub_client_ids_v3_0_0[cid][rw] : NULL;
break;
default:
mmhub_cid = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 910337dc28d1..14a742d3a99d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -117,7 +117,8 @@ mmhub_v3_0_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 0, 1):
- mmhub_cid = mmhub_client_ids_v3_0_1[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_1) ?
+ mmhub_client_ids_v3_0_1[cid][rw] : NULL;
break;
default:
mmhub_cid = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
index f0f182f033b9..e1f07f2a1852 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
@@ -108,7 +108,8 @@ mmhub_v3_0_2_print_l2_protection_fault_status(struct amdgpu_device *adev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- mmhub_cid = mmhub_client_ids_v3_0_2[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_2) ?
+ mmhub_client_ids_v3_0_2[cid][rw] : NULL;
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index 951998454b25..88bfe321f83a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -102,7 +102,8 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
status);
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
- mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v4_1_0) ?
+ mmhub_client_ids_v4_1_0[cid][rw] : NULL;
break;
default:
mmhub_cid = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index a359d612182d..3aa715830fbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -166,6 +166,10 @@ static int vcn_v5_0_0_sw_init(void *handle)
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 4078a8176187..e3749dae5e59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -600,6 +600,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
p->queue_size)) {
pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
p->queue_address, p->queue_size);
+ amdgpu_bo_unreserve(vm->root.bo);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4d508129a5e6..e092d2372a4e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -12318,7 +12318,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector,
u16 min_vfreq;
u16 max_vfreq;
- if (edid == NULL || edid->extensions == 0)
+ if (!edid || !edid->extensions)
return;
/* Find DisplayID extension */
@@ -12328,7 +12328,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector,
break;
}
- if (edid_ext == NULL)
+ if (i == edid->extensions)
return;
while (j < EDID_LENGTH) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..016230896d0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -97,6 +97,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a0c1072c59a2..7de77358e1c0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -255,6 +255,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
+ if (ctx->dce_version == DCN_VERSION_2_01) {
+ dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
+ }
if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
@@ -267,10 +271,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
- if (ctx->dce_version == DCN_VERSION_2_01) {
- dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base;
- }
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 13cf415e38e5..d50b9440210e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -164,7 +164,7 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
+ cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 85fb028b5d43..0a0e48507895 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -167,7 +167,7 @@ struct dc_stream_state *dc_create_stream_for_sink(
if (sink == NULL)
return NULL;
- stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ stream = kzalloc(sizeof(struct dc_stream_state), GFP_ATOMIC);
if (stream == NULL)
goto alloc_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index facf269c4326..b4eefe3ce7c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
uint32_t src_height;
uint32_t src_width;
uint32_t v_refresh;
- uint32_t sym_clock; /* HDMI only */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
struct dc_link_settings link_settings; /* DP only */
};
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index e62d415b1600..768b756d427c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -65,7 +65,11 @@ static void dcn401_initialize_min_clocks(struct dc *dc)
* audio corruption. Read current DISPCLK from DENTIST and request the same
* freq to ensure that the timing is valid and unchanged.
*/
- clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
+ if (dc->clk_mgr->funcs->get_dispclk_from_dentist) {
+ clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
+ } else {
+ clocks->dispclk_khz = dc->clk_mgr->boot_snapshot.dispclk * 1000;
+ }
}
clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
clocks->fclk_p_state_change_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 62dbb0f3073c..ccfdca7c83f6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1784,7 +1784,10 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_START();
dcn32_override_min_req_memclk(dc, context);
+ DC_FP_END();
+
dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index acd1cef61b7c..349544504c93 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -65,6 +65,7 @@ struct single_display_configuration {
uint32_t view_resolution_cy;
enum amd_pp_display_config_type displayconfigtype;
uint32_t vertical_refresh; /* for active display */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
};
#define MAX_NUM_DISPLAY 32
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
index 2d2d2d5e6763..9ef965e4a92e 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -100,3 +100,70 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
return vrefresh;
}
+
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ struct single_display_configuration *display_cfg;
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_connector *conn;
+ int num_crtcs = 0;
+ int vrefresh;
+ u32 vblank_in_pixels, vblank_time_us;
+
+ cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ /* The array should only contain active displays. */
+ if (!amdgpu_crtc->enabled)
+ continue;
+
+ conn = to_amdgpu_connector(amdgpu_crtc->connector);
+ display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++];
+
+ if (amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us =
+ vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
+
+ /* The legacy (non-DC) code has issues with mclk switching
+ * with refresh rates over 120 Hz. Disable mclk switching.
+ */
+ if (vrefresh > 120)
+ vblank_time_us = 0;
+
+ /* Find minimum vblank time. */
+ if (vblank_time_us < cfg->min_vblank_time)
+ cfg->min_vblank_time = vblank_time_us;
+
+ /* Find vertical refresh rate of first active display. */
+ if (!cfg->vrefresh)
+ cfg->vrefresh = vrefresh;
+ }
+
+ if (amdgpu_crtc->crtc_id < cfg->crtc_index) {
+ /* Find first active CRTC and its line time. */
+ cfg->crtc_index = amdgpu_crtc->crtc_id;
+ cfg->line_time_in_us = amdgpu_crtc->line_time;
+ }
+
+ display_cfg->controller_id = amdgpu_crtc->crtc_id;
+ display_cfg->pixel_clock = conn->pixelclock_for_modeset;
+ }
+ }
+
+ cfg->display_clk = adev->clock.default_dispclk;
+ cfg->num_display = num_crtcs;
+}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
index 5c2a89f0d5d5..8be11510cd92 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -29,4 +29,6 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 6b34a33d788f..8cf7e517da84 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) ||
pi->disable_nb_ps3_in_battery;
ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpm0_pg_nb_ps_hi = 0x2;
@@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
return 0;
force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+ (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start);
if (force_high) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index c7518b13e787..8eb121db2ce4 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -797,8 +797,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
int i;
struct amdgpu_ps *ps;
u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = adev->pm.pm_display_cfg.num_display < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
@@ -994,7 +993,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_get_active_displays(adev);
+ if (!adev->dc_enabled)
+ amdgpu_dpm_get_display_cfg(adev);
amdgpu_dpm_change_power_state_locked(adev);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 37a91a76208e..26defd72a36c 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -3058,7 +3058,7 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
static bool si_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
@@ -3424,9 +3424,10 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
+ const struct amd_pp_display_configuration *display_cfg =
+ &adev->pm.pm_display_cfg;
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
- struct amdgpu_connector *conn;
bool disable_mclk_switching = false;
bool disable_sclk_switching = false;
u32 mclk, sclk;
@@ -3439,9 +3440,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0xC3) ||
+ (adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6664) ||
(adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667)) {
+ (adev->pdev->device == 0x6667) ||
+ (adev->pdev->device == 0x666F)) {
max_sclk = 75000;
}
if ((adev->pdev->revision == 0xC3) ||
@@ -3475,14 +3478,9 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
* For example, 4K 60Hz and 1080p 144Hz fall into this category.
* Find number of such displays connected.
*/
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) ||
- !adev->mode_info.crtcs[i]->enabled)
- continue;
-
- conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector);
-
- if (conn->pixelclock_for_modeset > 297000)
+ for (i = 0; i < display_cfg->num_display; i++) {
+ /* The array only contains active displays. */
+ if (display_cfg->displays[i].pixel_clock > 297000)
high_pixelclock_count++;
}
@@ -3515,7 +3513,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
rps->ecclk = 0;
}
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ if ((adev->pm.pm_display_cfg.num_display > 1) ||
si_dpm_vblank_too_short(adev))
disable_mclk_switching = true;
@@ -3663,7 +3661,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
+ display_cfg->display_clk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
@@ -4188,16 +4186,16 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
static void si_program_display_gap(struct amdgpu_device *adev)
{
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
u32 tmp, pipe;
- int i;
tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
+ if (cfg->num_display > 0)
tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
else
tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
- if (adev->pm.dpm.new_active_crtc_count > 1)
+ if (cfg->num_display > 1)
tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
else
tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
@@ -4207,17 +4205,8 @@ static void si_program_display_gap(struct amdgpu_device *adev)
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
+ if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
+ pipe = cfg->crtc_index;
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
@@ -4228,7 +4217,7 @@ static void si_program_display_gap(struct amdgpu_device *adev)
* This can be a problem on PowerXpress systems or if you want to use the card
* for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+ si_notify_smc_display_change(adev, cfg->num_display > 0);
}
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
@@ -5532,7 +5521,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
(RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ (adev->pm.pm_display_cfg.num_display <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5681,7 +5670,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
/* XXX validate against display requirements! */
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
+ if (adev->pm.pm_display_cfg.display_clk <=
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
if (ulv->pl.vddc <
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
@@ -5835,30 +5824,22 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
static int si_upload_smc_data(struct amdgpu_device *adev)
{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
u32 crtc_index = 0;
u32 mclk_change_block_cp_min = 0;
u32 mclk_change_block_cp_max = 0;
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
- }
- }
-
/* When a display is plugged in, program these so that the SMC
* performs MCLK switching when it doesn't cause flickering.
* When no display is plugged in, there is no need to restrict
* MCLK switching, so program them to zero.
*/
- if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) {
- crtc_index = amdgpu_crtc->crtc_id;
+ if (cfg->num_display) {
+ crtc_index = cfg->crtc_index;
- if (amdgpu_crtc->line_time) {
- mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time;
- mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time;
+ if (cfg->line_time_in_us) {
+ mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
+ mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
}
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 0115d26b5af9..24b25cddf0c1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1569,16 +1569,7 @@ static void pp_pm_compute_clocks(void *handle)
struct amdgpu_device *adev = hwmgr->adev;
if (!adev->dc_enabled) {
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
-
+ amdgpu_dpm_get_display_cfg(adev);
pp_display_configuration_change(handle,
&adev->pm.pm_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index d83f04b28253..cbd3def52f4b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2033,6 +2033,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetricsExternal_t metrics_ext;
SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
+ uint32_t mp1_ver = amdgpu_ip_version(smu->adev, MP1_HWIP, 0);
int ret = 0;
ret = smu_cmn_get_metrics_table(smu,
@@ -2057,7 +2058,12 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
- gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
+
+ if ((mp1_ver == IP_VERSION(13, 0, 0) && smu->smc_fw_version <= 0x004e1e00) ||
+ (mp1_ver == IP_VERSION(13, 0, 10) && smu->smc_fw_version <= 0x00500800))
+ gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
+ else
+ gpu_metrics->energy_accumulator = UINT_MAX;
if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 7c753d795287..f14ad6290301 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2043,7 +2043,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
- gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
+ gpu_metrics->energy_accumulator = smu->smc_fw_version <= 0x00521400 ?
+ metrics->EnergyAccumulator : UINT_MAX;
if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 3bab8269a46a..d061467eba2e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2394,7 +2394,8 @@ static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 430f8adebf9c..f3f836da6c2a 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1697,6 +1697,14 @@ static int samsung_dsim_register_te_irq(struct samsung_dsim *dsi, struct device
return 0;
}
+static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
+{
+ if (dsi->te_gpio) {
+ free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+ gpiod_put(dsi->te_gpio);
+ }
+}
+
static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
@@ -1771,13 +1779,13 @@ static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
ret = samsung_dsim_register_te_irq(dsi, &device->dev);
if (ret)
- return ret;
+ goto err_remove_bridge;
}
if (pdata->host_ops && pdata->host_ops->attach) {
ret = pdata->host_ops->attach(dsi, device);
if (ret)
- return ret;
+ goto err_unregister_te_irq;
}
dsi->lanes = device->lanes;
@@ -1785,14 +1793,13 @@ static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
dsi->mode_flags = device->mode_flags;
return 0;
-}
-static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
-{
- if (dsi->te_gpio) {
- free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
- gpiod_put(dsi->te_gpio);
- }
+err_unregister_te_irq:
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO))
+ samsung_dsim_unregister_te_irq(dsi);
+err_remove_bridge:
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
}
static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 57a7ed13f996..9eb57d3098ac 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -303,9 +303,9 @@ static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx,
* DSI_CLK = mode clock * bpp / dsi_data_lanes / 2
* the 2 is there because the bus is DDR.
*/
- return DIV_ROUND_UP(clamp((unsigned int)mode->clock *
- mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
- ctx->dsi->lanes / 2, 40000U, 500000U), 5000U);
+ return clamp((unsigned int)mode->clock *
+ mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
+ ctx->dsi->lanes / 2, 40000U, 500000U) / 5000U;
}
static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
@@ -325,6 +325,7 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ const unsigned int dual_factor = ctx->lvds_dual_link ? 2 : 1;
struct drm_atomic_state *state = old_bridge_state->base.state;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
@@ -452,18 +453,18 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
/* 32 + 1 pixel clock to ensure proper operation */
le16val = cpu_to_le16(32 + 1);
regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &le16val, 2);
- le16val = cpu_to_le16(mode->hsync_end - mode->hsync_start);
+ le16val = cpu_to_le16((mode->hsync_end - mode->hsync_start) / dual_factor);
regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
&le16val, 2);
le16val = cpu_to_le16(mode->vsync_end - mode->vsync_start);
regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
&le16val, 2);
regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH,
- mode->htotal - mode->hsync_end);
+ (mode->htotal - mode->hsync_end) / dual_factor);
regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH,
mode->vtotal - mode->vsync_end);
regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
- mode->hsync_start - mode->hdisplay);
+ (mode->hsync_start - mode->hdisplay) / dual_factor);
regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH,
mode->vsync_start - mode->vdisplay);
regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9859ec688e56..a3d392cc5f2e 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -106,10 +106,21 @@
#define SN_PWM_EN_INV_REG 0xA5
#define SN_PWM_INV_MASK BIT(0)
#define SN_PWM_EN_MASK BIT(1)
+
+#define SN_IRQ_EN_REG 0xE0
+#define IRQ_EN BIT(0)
+
+#define SN_IRQ_EVENTS_EN_REG 0xE6
+#define HPD_INSERTION_EN BIT(1)
+#define HPD_REMOVAL_EN BIT(2)
+
#define SN_AUX_CMD_STATUS_REG 0xF4
#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
+#define SN_IRQ_STATUS_REG 0xF5
+#define HPD_REMOVAL_STATUS BIT(2)
+#define HPD_INSERTION_STATUS BIT(1)
#define MIN_DSI_CLK_FREQ_MHZ 40
@@ -152,7 +163,9 @@
* @ln_assign: Value to program to the LN_ASSIGN register.
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
* @comms_enabled: If true then communication over the aux channel is enabled.
+ * @hpd_enabled: If true then HPD events are enabled.
* @comms_mutex: Protects modification of comms_enabled.
+ * @hpd_mutex: Protects modification of hpd_enabled.
*
* @gchip: If we expose our GPIOs, this is used.
* @gchip_output: A cache of whether we've set GPIOs to output. This
@@ -190,7 +203,9 @@ struct ti_sn65dsi86 {
u8 ln_assign;
u8 ln_polrs;
bool comms_enabled;
+ bool hpd_enabled;
struct mutex comms_mutex;
+ struct mutex hpd_mutex;
#if defined(CONFIG_OF_GPIO)
struct gpio_chip gchip;
@@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
.max_register = 0xFF,
};
+static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg,
+ u8 *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(pdata->regmap, reg, ®_val);
+ if (ret) {
+ dev_err(pdata->dev, "fail to read raw reg %#x: %d\n",
+ reg, ret);
+ return ret;
+ }
+ *val = (u8)reg_val;
+
+ return 0;
+}
+
static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
unsigned int reg, u16 *val)
{
@@ -362,6 +394,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata)
static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
int ret;
ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
@@ -396,6 +429,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
if (pdata->refclk)
ti_sn65dsi86_enable_comms(pdata);
+ if (client->irq) {
+ ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN,
+ IRQ_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret);
+ }
+
return ret;
}
@@ -1223,6 +1263,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
/*
* Device needs to be powered on before reading the HPD state
@@ -1231,11 +1273,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
*/
pm_runtime_get_sync(pdata->dev);
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = true;
+ mutex_unlock(&pdata->hpd_mutex);
+
+ if (client->irq) {
+ ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret);
+ }
}
static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
+
+ if (client->irq) {
+ ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret);
+ }
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = false;
+ mutex_unlock(&pdata->hpd_mutex);
pm_runtime_put_autosuspend(pdata->dev);
}
@@ -1321,11 +1387,47 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
return 0;
}
+static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private)
+{
+ struct ti_sn65dsi86 *pdata = private;
+ struct drm_device *dev = pdata->bridge.dev;
+ u8 status;
+ int ret;
+ bool hpd_event;
+
+ ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS);
+
+ dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status);
+ if (!status)
+ return IRQ_NONE;
+
+ ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Only send the HPD event if we are bound with a device. */
+ mutex_lock(&pdata->hpd_mutex);
+ if (pdata->hpd_enabled && hpd_event)
+ drm_kms_helper_hotplug_event(dev);
+ mutex_unlock(&pdata->hpd_mutex);
+
+ return IRQ_HANDLED;
+}
+
static int ti_sn_bridge_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
struct device_node *np = pdata->dev->of_node;
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
int ret;
pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
@@ -1345,8 +1447,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
- pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
- DRM_BRIDGE_OP_HPD;
+ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
+ if (client->irq)
+ pdata->bridge.ops |= DRM_BRIDGE_OP_HPD;
/*
* If comms were already enabled they would have been enabled
* with the wrong value of HPD_DISABLE. Update it now. Comms
@@ -1952,6 +2055,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
+ mutex_init(&pdata->hpd_mutex);
mutex_init(&pdata->comms_mutex);
pdata->regmap = devm_regmap_init_i2c(client,
@@ -1982,6 +2086,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (ret)
return ret;
+ if (client->irq) {
+ ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL,
+ ti_sn_bridge_interrupt,
+ IRQF_ONESHOT,
+ dev_name(pdata->dev), pdata);
+
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request interrupt\n");
+ }
+
/*
* Break ourselves up into a collection of aux devices. The only real
* motiviation here is to solve the chicken-and-egg problem of probe
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index e2cf118ff01d..fe97a5fd4b9c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -223,6 +223,7 @@ static void drm_events_release(struct drm_file *file_priv)
void drm_file_free(struct drm_file *file)
{
struct drm_device *dev;
+ int idx;
if (!file)
return;
@@ -236,9 +237,11 @@ void drm_file_free(struct drm_file *file)
drm_events_release(file);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+ drm_dev_enter(dev, &idx)) {
drm_fb_release(file);
drm_property_destroy_user_blobs(dev, file);
+ drm_dev_exit(idx);
}
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 37d2e0a4ef4b..502d88ca9ffa 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -553,10 +553,13 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
+ if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) {
+ struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]");
- drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
- drm_framebuffer_print_info(&p, 1, fb);
+ drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
+ drm_framebuffer_print_info(&p, 1, fb);
+ }
+ list_del_init(&fb->filp_head);
drm_framebuffer_free(&fb->base.refcount);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 9812191e7ef2..2039c17a9ee7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1218,6 +1218,7 @@ struct intel_crtc_state {
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
+ u8 entry_setup_frames;
/*
* Frequence the dpll for the port should run at. Differs from the
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 34d61e44c6bd..e127ce172709 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -41,6 +41,7 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
+#include "intel_vdsc.h"
#include "skl_universal_plane.h"
/**
@@ -1570,7 +1571,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
if (entry_setup_frames >= 0) {
- intel_dp->psr.entry_setup_frames = entry_setup_frames;
+ crtc_state->entry_setup_frames = entry_setup_frames;
} else {
drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
@@ -1978,6 +1979,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
+ intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2114,12 +2116,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Panel Replay on eDP is always using ALPM aux less. */
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
- ALPM_CTL_ALPM_ENABLE |
- ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
-
- intel_de_rmw(display,
- PORT_ALPM_CTL(display, cpu_transcoder),
- PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+ ALPM_CTL_ALPM_ENABLE, 0);
}
/* Disable PSR on Sink */
@@ -2317,6 +2314,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_dsc_su_et_parameters_configure(NULL, encoder, crtc_state,
+ drm_rect_height(&crtc_state->psr2_su_area));
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2390,12 +2393,13 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
-static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
+static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
+ bool su_area_changed = false;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
@@ -2404,10 +2408,18 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
else
y_alignment = crtc_state->su_y_granularity;
- crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
- if (crtc_state->psr2_su_area.y2 % y_alignment)
+ if (crtc_state->psr2_su_area.y1 % y_alignment) {
+ crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
+ su_area_changed = true;
+ }
+
+ if (crtc_state->psr2_su_area.y2 % y_alignment) {
crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
y_alignment) + 1) * y_alignment;
+ su_area_changed = true;
+ }
+
+ return su_area_changed;
}
/*
@@ -2492,7 +2504,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false, cursor_in_su_area = false;
+ bool full_update = false, su_area_changed;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2604,15 +2616,32 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /*
- * Adjust su area to cover cursor fully as necessary (early
- * transport). This needs to be done after
- * drm_atomic_add_affected_planes to ensure visible cursor is added into
- * affected planes even when cursor is not updated by itself.
- */
- intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+ do {
+ bool cursor_in_su_area;
- intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary
+ * (early transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible
+ * cursor is added into affected planes even when
+ * cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+
+ su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+
+ /*
+ * If the cursor was outside the SU area before
+ * alignment, the alignment step (which only expands
+ * SU) may pull the cursor partially inside, so we
+ * must run ET alignment again to fully cover it. But
+ * if the cursor was already fully inside before
+ * alignment, expanding the SU area won't change that,
+ * so no further work is needed.
+ */
+ if (cursor_in_su_area)
+ break;
+ } while (su_area_changed);
/*
* Now that we have the pipe damaged area check if it intersect with
@@ -2672,6 +2701,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
+ if (full_update)
+ clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src,
+ &crtc_state->pipe_src);
+
psr2_man_trk_ctl_calc(crtc_state, full_update);
crtc_state->pipe_srcsz_early_tpt =
psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 2e849b015e74..0c0847b0732f 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -723,6 +723,29 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
sizeof(dp_dsc_pps_sdp));
}
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum pipe pipe = crtc->pipe;
+ int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
+ int slice_row_per_frame = su_lines / vdsc_cfg->slice_height;
+ u32 val;
+
+ drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height);
+ drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2);
+
+ val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame);
+ val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines);
+
+ intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val);
+
+ if (vdsc_instances_per_pipe == 2)
+ intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val);
+}
+
static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
return is_pipe_dsc(crtc, cpu_transcoder) ?
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 290b2e9b3482..7f1b318a7255 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -13,6 +13,7 @@ struct drm_printer;
enum transcoder;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dsb;
struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
@@ -29,6 +30,8 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index f921ad67b587..524a8124237c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -186,6 +186,18 @@
#define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset)
#define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset)
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364
+#define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB)
+#define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB)
+
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20)
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows))
+#define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h))
+
/* Icelake Rate Control Buffer Threshold Registers */
#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index ae3343c81a64..be6dfa0c88c2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -151,8 +151,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
}
} while (1);
- nr_pages = min_t(unsigned long,
- folio_nr_pages(folio), page_count - i);
+ nr_pages = min_array(((unsigned long[]) {
+ folio_nr_pages(folio),
+ page_count - i,
+ max_segment / PAGE_SIZE,
+ }), 3);
+
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
@@ -162,7 +166,9 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
st->nents++;
sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
- /* XXX: could overflow? */
+ nr_pages = min_t(unsigned long, nr_pages,
+ (max_segment - sg->length) / PAGE_SIZE);
+
sg->length += nr_pages * PAGE_SIZE;
}
next_pfn = folio_pfn(folio) + nr_pages;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4d30a86016f2..d84b6c2af860 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1970,7 +1970,8 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
if (engine->sanitize)
engine->sanitize(engine);
- engine->set_default_submission(engine);
+ if (engine->set_default_submission)
+ engine->set_default_submission(engine);
}
}
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index d97613c6a0a9..bf4cf8426f91 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -408,7 +408,16 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset)
}
/* Disable IRQs for the duration of the reset. */
- disable_irq(pvr_dev->irq);
+ if (hard_reset) {
+ disable_irq(pvr_dev->irq);
+ } else {
+ /*
+ * Soft reset is triggered as a response to a FW command to the Host and is
+ * processed from the threaded IRQ handler. This code cannot (nor needs to)
+ * wait for any IRQ processing to complete.
+ */
+ disable_irq_nosync(pvr_dev->irq);
+ }
do {
if (hard_reset) {
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 39641551eeb6..b2e91e06dfd9 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -79,7 +79,7 @@ static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
- dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ dma_free_attrs(mmu->dev, TABLE_SIZE + 32, gpummu->table, gpummu->pt_base,
DMA_ATTR_FORCE_CONTIGUOUS);
kfree(gpummu);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index d22e01751f5e..0c360e790329 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -534,13 +534,30 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
* FIXME: Reconsider this if/when CMD mode handling is rewritten to use
* transfer time and data overhead as a starting point of the calculations.
*/
-static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
- const struct drm_dsc_config *dsc)
+static unsigned long
+dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
+ const struct drm_dsc_config *dsc,
+ bool is_bonded_dsi)
{
- int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc),
- dsc->bits_per_component * 3);
+ int hdisplay, new_hdisplay, new_htotal;
- int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
+ /*
+ * For bonded DSI, split hdisplay across two links and round up each
+ * half separately, passing the full hdisplay would only round up once.
+ * This also aligns with the hdisplay we program later in
+ * dsi_timing_setup()
+ */
+ hdisplay = mode->hdisplay;
+ if (is_bonded_dsi)
+ hdisplay /= 2;
+
+ new_hdisplay = DIV_ROUND_UP(hdisplay * drm_dsc_get_bpp_int(dsc),
+ dsc->bits_per_component * 3);
+
+ if (is_bonded_dsi)
+ new_hdisplay *= 2;
+
+ new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
}
@@ -553,7 +570,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
pclk_rate = mode->clock * 1000u;
if (dsc)
- pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
+ pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc, is_bonded_dsi);
/*
* For bonded DSI mode, the current DRM mode has the complete width of the
@@ -944,7 +961,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->dsc) {
struct drm_dsc_config *dsc = msm_host->dsc;
- u32 bytes_per_pclk;
+ u32 bits_per_pclk;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@@ -966,7 +983,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
/*
* DPU sends 3 bytes per pclk cycle to DSI. If widebus is
- * enabled, bus width is extended to 6 bytes.
+ * enabled, MDP always sends out 48-bit compressed data per
+ * pclk and on average, DSI consumes an amount of compressed
+ * data equivalent to the uncompressed pixel depth per pclk.
*
* Calculate the number of pclks needed to transmit one line of
* the compressed data.
@@ -978,12 +997,12 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
* unused anyway.
*/
h_total -= hdisplay;
- if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
- bytes_per_pclk = 6;
+ if (wide_bus_enabled)
+ bits_per_pclk = mipi_dsi_pixel_format_to_bpp(msm_host->format);
else
- bytes_per_pclk = 3;
+ bits_per_pclk = 24;
- hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk);
+ hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc) * 8, bits_per_pclk);
h_total += hdisplay;
ha_end = ha_start + hdisplay;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 5ab4201c981e..2764de3495e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1229,6 +1229,9 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
u8 size = msg->size;
int ret;
+ if (pm_runtime_suspended(nv_connector->base.dev->dev))
+ return -EBUSY;
+
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
if (!nv_encoder)
return -ENODEV;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index f12227145ef0..0342d095d44c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2915,9 +2915,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6660) ||
(rdev->pdev->device == 0x6664) ||
(rdev->pdev->device == 0x6665) ||
- (rdev->pdev->device == 0x6667)) {
+ (rdev->pdev->device == 0x6667) ||
+ (rdev->pdev->device == 0x666F)) {
max_sclk = 75000;
}
if ((rdev->pdev->revision == 0xC3) ||
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index b9c6ed352182..f8b1eaffb5e8 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -345,6 +345,12 @@ static int st7586_probe(struct spi_device *spi)
if (ret)
return ret;
+ /*
+ * Override value set by mipi_dbi_spi_init(). This driver is a bit
+ * non-standard, so best to set it explicitly here.
+ */
+ dbi->write_memory_bpw = 8;
+
/* Cannot read from this controller via SPI */
dbi->read_commands = NULL;
@@ -354,15 +360,6 @@ static int st7586_probe(struct spi_device *spi)
if (ret)
return ret;
- /*
- * we are using 8-bit data, so we are not actually swapping anything,
- * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the
- * right thing and not use 16-bit transfers (which results in swapped
- * bytes on little-endian systems and causes out of order data to be
- * sent to the display).
- */
- dbi->swap_bytes = true;
-
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 76e1092f51d9..e07cdb1ecc90 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -202,6 +202,8 @@ static void dev_fini_ggtt(void *arg)
{
struct xe_ggtt *ggtt = arg;
+ scoped_guard(mutex, &ggtt->lock)
+ ggtt->flags &= ~XE_GGTT_FLAGS_ONLINE;
drain_workqueue(ggtt->wq);
}
@@ -261,6 +263,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (err)
return err;
+ ggtt->flags |= XE_GGTT_FLAGS_ONLINE;
err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
if (err)
return err;
@@ -293,13 +296,10 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
static void ggtt_node_remove(struct xe_ggtt_node *node)
{
struct xe_ggtt *ggtt = node->ggtt;
- struct xe_device *xe = tile_to_xe(ggtt->tile);
bool bound;
- int idx;
-
- bound = drm_dev_enter(&xe->drm, &idx);
mutex_lock(&ggtt->lock);
+ bound = ggtt->flags & XE_GGTT_FLAGS_ONLINE;
if (bound)
xe_ggtt_clear(ggtt, node->base.start, node->base.size);
drm_mm_remove_node(&node->base);
@@ -312,8 +312,6 @@ static void ggtt_node_remove(struct xe_ggtt_node *node)
if (node->invalidate_on_remove)
xe_ggtt_invalidate(ggtt);
- drm_dev_exit(idx);
-
free_node:
xe_ggtt_node_fini(node);
}
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index cb02b7994a9a..c5c86b53b2ed 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -25,11 +25,14 @@ struct xe_ggtt {
/** @size: Total size of this GGTT */
u64 size;
-#define XE_GGTT_FLAGS_64K BIT(0)
+#define XE_GGTT_FLAGS_64K BIT(0)
+#define XE_GGTT_FLAGS_ONLINE BIT(1)
/**
* @flags: Flags for this GGTT
* Acceptable flags:
* - %XE_GGTT_FLAGS_64K - if PTE size is 64K. Otherwise, regular is 4K.
+ * - %XE_GGTT_FLAGS_ONLINE - is GGTT online, protected by ggtt->lock
+ * after init
*/
unsigned int flags;
/** @scratch: Internal object allocation used as a scratch page */
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 3f142f95e5d4..fe997494a6f9 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -520,8 +520,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf,
size_t offset = 0;
int ret;
- /* Can't read from disabled streams */
- if (!stream->enabled || !stream->sample)
+ if (!stream->sample)
return -EINVAL;
if (!(file->f_flags & O_NONBLOCK)) {
@@ -1375,6 +1374,10 @@ static void xe_oa_stream_disable(struct xe_oa_stream *stream)
if (stream->sample)
hrtimer_cancel(&stream->poll_check_timer);
+
+ /* Update stream->oa_buffer.tail to allow any final reports to be read */
+ if (xe_oa_buffer_check_unlocked(stream))
+ wake_up(&stream->poll_wq);
}
static int xe_oa_enable_preempt_timeslice(struct xe_oa_stream *stream)
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index dd7bd766ae18..6affdd0a8095 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -142,8 +142,10 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
if (!signal) {
sync->fence = drm_syncobj_fence_get(sync->syncobj);
- if (XE_IOCTL_DBG(xe, !sync->fence))
- return -EINVAL;
+ if (XE_IOCTL_DBG(xe, !sync->fence)) {
+ err = -EINVAL;
+ goto free_sync;
+ }
}
break;
@@ -163,17 +165,21 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
if (signal) {
sync->chain_fence = dma_fence_chain_alloc();
- if (!sync->chain_fence)
- return -ENOMEM;
+ if (!sync->chain_fence) {
+ err = -ENOMEM;
+ goto free_sync;
+ }
} else {
sync->fence = drm_syncobj_fence_get(sync->syncobj);
- if (XE_IOCTL_DBG(xe, !sync->fence))
- return -EINVAL;
+ if (XE_IOCTL_DBG(xe, !sync->fence)) {
+ err = -EINVAL;
+ goto free_sync;
+ }
err = dma_fence_chain_find_seqno(&sync->fence,
sync_in.timeline_value);
if (err)
- return err;
+ goto free_sync;
}
break;
@@ -207,6 +213,10 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
sync->timeline_value = sync_in.timeline_value;
return 0;
+
+free_sync:
+ xe_sync_entry_cleanup(sync);
+ return err;
}
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index bd3cc5636648..284861c166d9 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -450,6 +450,8 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
(u64)(long)ctx,
true); /* prevent infinite recursions */
+ if (ret > size)
+ ret = size;
if (ret > 0)
memcpy(buf, dma_data, ret);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 0b0a9f4c2307..154250099adf 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -234,7 +234,7 @@ static int max6639_read_fan(struct device *dev, u32 attr, int channel,
static int max6639_set_ppr(struct max6639_data *data, int channel, u8 ppr)
{
/* Decrement the PPR value and shift left by 6 to match the register format */
- return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr-- << 6);
+ return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), --ppr << 6);
}
static int max6639_write_fan(struct device *dev, u32 attr, int channel,
@@ -536,8 +536,8 @@ static int max6639_probe_child_from_dt(struct i2c_client *client,
{
struct device *dev = &client->dev;
- u32 i;
- int err, val;
+ u32 i, val;
+ int err;
err = of_property_read_u32(child, "reg", &i);
if (err) {
@@ -552,8 +552,8 @@ static int max6639_probe_child_from_dt(struct i2c_client *client,
err = of_property_read_u32(child, "pulses-per-revolution", &val);
if (!err) {
- if (val < 1 || val > 5) {
- dev_err(dev, "invalid pulses-per-revolution %d of %pOFn\n", val, child);
+ if (val < 1 || val > 4) {
+ dev_err(dev, "invalid pulses-per-revolution %u of %pOFn\n", val, child);
return -EINVAL;
}
data->ppr[i] = val;
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 7e53fb1d5ea3..f1cf3c9666df 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -80,8 +80,11 @@ static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
{
int val = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
- return sprintf(buf, "%d\n",
- (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS ? 1 : 0);
+ if (val < 0)
+ return val;
+
+ return sysfs_emit(buf, "%d\n",
+ (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS);
}
static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client,
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index 280bb12f762c..e26f09596a94 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -313,6 +313,8 @@ static int mp2973_read_word_data(struct i2c_client *client, int page,
case PMBUS_STATUS_WORD:
/* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
ret ^= PB_STATUS_POWER_GOOD_N;
break;
case PMBUS_OT_FAULT_LIMIT:
diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
index a235c1cdf4fe..d9fd1095ae7e 100644
--- a/drivers/hwmon/pmbus/q54sj108a2.c
+++ b/drivers/hwmon/pmbus/q54sj108a2.c
@@ -78,7 +78,8 @@ static ssize_t q54sj108a2_debugfs_read(struct file *file, char __user *buf,
int idx = *idxp;
struct q54sj108a2_data *psu = to_psu(idxp, idx);
char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
- char data_char[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
+ char data_char[I2C_SMBUS_BLOCK_MAX * 2 + 2] = { 0 };
+ char *out = data;
char *res;
switch (idx) {
@@ -149,27 +150,27 @@ static ssize_t q54sj108a2_debugfs_read(struct file *file, char __user *buf,
if (rc < 0)
return rc;
- res = bin2hex(data, data_char, 32);
- rc = res - data;
-
+ res = bin2hex(data_char, data, rc);
+ rc = res - data_char;
+ out = data_char;
break;
case Q54SJ108A2_DEBUGFS_FLASH_KEY:
rc = i2c_smbus_read_block_data(psu->client, PMBUS_FLASH_KEY_WRITE, data);
if (rc < 0)
return rc;
- res = bin2hex(data, data_char, 4);
- rc = res - data;
-
+ res = bin2hex(data_char, data, rc);
+ rc = res - data_char;
+ out = data_char;
break;
default:
return -EINVAL;
}
- data[rc] = '\n';
+ out[rc] = '\n';
rc += 2;
- return simple_read_from_buffer(buf, count, ppos, data, rc);
+ return simple_read_from_buffer(buf, count, ppos, out, rc);
}
static ssize_t q54sj108a2_debugfs_write(struct file *file, const char __user *buf,
diff --git a/drivers/i2c/busses/i2c-cp2615.c b/drivers/i2c/busses/i2c-cp2615.c
index e7720ea4045e..7b62ba115eb9 100644
--- a/drivers/i2c/busses/i2c-cp2615.c
+++ b/drivers/i2c/busses/i2c-cp2615.c
@@ -298,6 +298,9 @@ cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id)
if (!adap)
return -ENOMEM;
+ if (!usbdev->serial)
+ return -EINVAL;
+
strscpy(adap->name, usbdev->serial, sizeof(adap->name));
adap->owner = THIS_MODULE;
adap->dev.parent = &usbif->dev;
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index ae016a9431da..6a9245423d2b 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -728,6 +728,7 @@ static int fsi_i2c_probe(struct device *dev)
rc = i2c_add_adapter(&port->adapter);
if (rc < 0) {
dev_err(dev, "Failed to register adapter: %d\n", rc);
+ of_node_put(np);
kfree(port);
continue;
}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index afc1a8171f59..6492c9a451f4 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -268,6 +268,7 @@ struct pxa_i2c {
struct pinctrl *pinctrl;
struct pinctrl_state *pinctrl_default;
struct pinctrl_state *pinctrl_recovery;
+ bool reset_before_xfer;
};
#define _IBMR(i2c) ((i2c)->reg_ibmr)
@@ -1144,6 +1145,11 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap,
{
struct pxa_i2c *i2c = adap->algo_data;
+ if (i2c->reset_before_xfer) {
+ i2c_pxa_reset(i2c);
+ i2c->reset_before_xfer = false;
+ }
+
return i2c_pxa_internal_xfer(i2c, msgs, num, i2c_pxa_do_xfer);
}
@@ -1521,7 +1527,16 @@ static int i2c_pxa_probe(struct platform_device *dev)
}
}
- i2c_pxa_reset(i2c);
+ /*
+ * Skip reset on Armada 3700 when recovery is used to avoid
+ * controller hang due to the pinctrl state changes done by
+ * the generic recovery initialization code. The reset will
+ * be performed later, prior to the first transfer.
+ */
+ if (i2c_type == REGS_A3700 && i2c->adap.bus_recovery_info)
+ i2c->reset_before_xfer = true;
+ else
+ i2c_pxa_reset(i2c);
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0)
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 4c019c746f23..e0853a6bde0a 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1013,7 +1013,7 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->free_pos &= ~BIT(pos);
}
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr) | DEV_ADDR_TABLE_SIR_REJECT,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
@@ -1042,7 +1042,7 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr) | DEV_ADDR_TABLE_SIR_REJECT,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd.h b/drivers/i3c/master/mipi-i3c-hci/cmd.h
index 1d6dd2c5d01a..b1bf87daa651 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd.h
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd.h
@@ -17,6 +17,7 @@
#define CMD_0_TOC W0_BIT_(31)
#define CMD_0_ROC W0_BIT_(30)
#define CMD_0_ATTR W0_MASK(2, 0)
+#define CMD_0_TID W0_MASK(6, 3)
/*
* Response Descriptor Structure
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index dd636094b07f..1b4f4d0db486 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -334,7 +334,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
hci->io->queue_xfer(hci, xfer, 1);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 1)) {
- ret = -ETIME;
+ ret = -ETIMEDOUT;
break;
}
if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER ||
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
index 4493b2b067cb..3d33bfe937a6 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -277,7 +277,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
hci->io->queue_xfer(hci, xfer, 2);
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, 2)) {
- ret = -ETIME;
+ ret = -ETIMEDOUT;
break;
}
if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index a82c47c9986d..0fdd74704b4e 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -236,7 +236,7 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
- ret = -ETIME;
+ ret = -ETIMEDOUT;
goto out;
}
for (i = prefixed; i < nxfers; i++) {
@@ -348,7 +348,7 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
- ret = -ETIME;
+ ret = -ETIMEDOUT;
goto out;
}
for (i = 0; i < nxfers; i++) {
@@ -402,7 +402,7 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
goto out;
if (!wait_for_completion_timeout(&done, HZ) &&
hci->io->dequeue_xfer(hci, xfer, nxfers)) {
- ret = -ETIME;
+ ret = -ETIMEDOUT;
goto out;
}
for (i = 0; i < nxfers; i++) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 68c2923f5566..36a4c13ab757 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -482,7 +482,7 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
/* store no-op cmd descriptor */
- *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
+ *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid);
*ring_data++ = 0;
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
*ring_data++ = 0;
@@ -500,7 +500,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
}
/* restart the ring */
+ mipi_i3c_hci_resume(hci);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);
return did_unqueue;
}
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 0b96534c6867..3b0fd9eacdfb 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -543,7 +543,7 @@ static int bme680_wait_for_eoc(struct bme680_data *data)
* + heater duration
*/
int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
- data->oversampling_humid) * 1936) + (477 * 4) +
+ data->oversampling_humid) * 1963) + (477 * 4) +
(477 * 5) + 1000 + (data->heater_dur * 1000);
usleep_range(wait_eoc_us, wait_eoc_us + 100);
diff --git a/drivers/iio/chemical/sps30_i2c.c b/drivers/iio/chemical/sps30_i2c.c
index 1b21b6bcd0e7..2d47fb0a47c2 100644
--- a/drivers/iio/chemical/sps30_i2c.c
+++ b/drivers/iio/chemical/sps30_i2c.c
@@ -171,7 +171,7 @@ static int sps30_i2c_read_meas(struct sps30_state *state, __be32 *meas, size_t n
if (!sps30_i2c_meas_ready(state))
return -ETIMEDOUT;
- return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(num) * num);
+ return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(*meas) * num);
}
static int sps30_i2c_clean_fan(struct sps30_state *state)
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
index a6dfbe28c914..7a63204eec79 100644
--- a/drivers/iio/chemical/sps30_serial.c
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -303,7 +303,7 @@ static int sps30_serial_read_meas(struct sps30_state *state, __be32 *meas, size_
if (msleep_interruptible(1000))
return -EINTR;
- ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(num));
+ ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(*meas));
if (ret < 0)
return ret;
/* if measurements aren't ready sensor returns empty frame */
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index e89e4c054653..de47407c5f44 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -141,7 +141,7 @@ static int ds4424_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val < S8_MIN || val > S8_MAX)
+ if (val <= S8_MIN || val > S8_MAX)
return -EINVAL;
if (val > 0) {
diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c
index 45ceeb828d6b..5dc780a62cc7 100644
--- a/drivers/iio/frequency/adf4377.c
+++ b/drivers/iio/frequency/adf4377.c
@@ -501,7 +501,7 @@ static int adf4377_soft_reset(struct adf4377_state *st)
return ret;
return regmap_read_poll_timeout(st->regmap, 0x0, read_val,
- !(read_val & (ADF4377_0000_SOFT_RESET_R_MSK |
+ !(read_val & (ADF4377_0000_SOFT_RESET_MSK |
ADF4377_0000_SOFT_RESET_R_MSK)), 200, 200 * 100);
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 4dcd0cc54551..3ef218c1519e 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -322,7 +322,9 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_RAW:
/* Resume device */
- pm_runtime_get_sync(mpu3050->dev);
+ ret = pm_runtime_resume_and_get(mpu3050->dev);
+ if (ret)
+ return ret;
mutex_lock(&mpu3050->lock);
ret = mpu3050_set_8khz_samplerate(mpu3050);
@@ -648,14 +650,20 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
{
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
- pm_runtime_get_sync(mpu3050->dev);
+ ret = pm_runtime_resume_and_get(mpu3050->dev);
+ if (ret)
+ return ret;
/* Unless we have OUR trigger active, run at full speed */
- if (!mpu3050->hw_irq_trigger)
- return mpu3050_set_8khz_samplerate(mpu3050);
+ if (!mpu3050->hw_irq_trigger) {
+ ret = mpu3050_set_8khz_samplerate(mpu3050);
+ if (ret)
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ }
- return 0;
+ return ret;
}
static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 29ecfa6fd633..81968e383f1e 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -19,8 +19,7 @@ static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
/* Just power up the device, that is all that is needed */
- pm_runtime_get_sync(mpu3050->dev);
- return 0;
+ return pm_runtime_resume_and_get(mpu3050->dev);
}
static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 8da15cde388a..13ced07cec24 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -454,6 +454,8 @@ static int inv_icm42600_accel_write_odr(struct iio_dev *indio_dev,
return -EINVAL;
conf.odr = inv_icm42600_accel_odr_conv[idx / 2];
+ if (conf.odr == st->conf.accel.odr)
+ return 0;
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 00b9db52ca78..890ddfc779d5 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -389,6 +389,8 @@ static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm42600_sensor_state *sensor_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &sensor_st->ts;
struct device *dev = regmap_get_device(st->map);
unsigned int sensor;
unsigned int *watermark;
@@ -410,6 +412,8 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
mutex_lock(&st->lock);
+ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+
ret = inv_icm42600_buffer_set_fifo_en(st, st->fifo.en & ~sensor);
if (ret)
goto out_unlock;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 6c7430dac6db..0e03e5cdc066 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -361,6 +361,8 @@ static int inv_icm42600_gyro_write_odr(struct iio_dev *indio_dev,
return -EINVAL;
conf.odr = inv_icm42600_gyro_odr_conv[idx / 2];
+ if (conf.odr == st->conf.gyro.odr)
+ return 0;
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 14d95f34e981..6afc78810820 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -1922,6 +1922,14 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
irq_type);
return -EINVAL;
}
+
+ /*
+ * Acking interrupts by status register does not work reliably
+ * but seem to work when this bit is set.
+ */
+ if (st->chip_type == INV_MPU9150)
+ st->irq_mask |= INV_MPU6050_INT_RD_CLEAR;
+
device_set_wakeup_capable(dev, true);
st->vdd_supply = devm_regulator_get(dev, "vdd");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index e1c0c5146876..e3618ca3fadd 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -387,6 +387,8 @@ struct inv_mpu6050_state {
/* enable level triggering */
#define INV_MPU6050_LATCH_INT_EN 0x20
#define INV_MPU6050_BIT_BYPASS_EN 0x2
+/* allow acking interrupts by any register read */
+#define INV_MPU6050_INT_RD_CLEAR 0x10
/* Allowed timestamp period jitter in percent */
#define INV_MPU6050_TS_PERIOD_JITTER 4
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 5b1088cc3704..c60e4109ed1d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -249,7 +249,6 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p)
switch (st->chip_type) {
case INV_MPU6000:
case INV_MPU6050:
- case INV_MPU9150:
/*
* WoM is not supported and interrupt status read seems to be broken for
* some chips. Since data ready is the only interrupt, bypass interrupt
@@ -258,6 +257,10 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p)
wom_bits = 0;
int_status = INV_MPU6050_BIT_RAW_DATA_RDY_INT;
goto data_ready_interrupt;
+ case INV_MPU9150:
+ /* IRQ needs to be acked */
+ wom_bits = 0;
+ break;
case INV_MPU6500:
case INV_MPU6515:
case INV_MPU6880:
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index c4efbfc18e97..989b70ec923a 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -228,8 +228,10 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
written = 0;
add_wait_queue(&rb->pollq, &wait);
do {
- if (!indio_dev->info)
- return -ENODEV;
+ if (!indio_dev->info) {
+ ret = -ENODEV;
+ break;
+ }
if (!iio_buffer_space_available(rb)) {
if (signal_pending(current)) {
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index 7890c0993ec4..0204b7629d20 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -222,7 +222,7 @@ static int mcp4131_write_raw(struct iio_dev *indio_dev,
mutex_lock(&data->lock);
- data->buf[0] = address << MCP4131_WIPER_SHIFT;
+ data->buf[0] = address;
data->buf[0] |= MCP4131_WRITE | (val >> 8);
data->buf[1] = val & 0xFF; /* 8 bits here */
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index d8fb34060d3d..e780373af2d6 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -708,6 +708,9 @@ static int hx9023s_set_samp_freq(struct hx9023s_data *data, int val, int val2)
struct device *dev = regmap_get_device(data->regmap);
unsigned int i, period_ms;
+ if (!val && !val2)
+ return -EINVAL;
+
period_ms = div_u64(NANO, (val * MEGA + val2));
for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) {
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 7f553f7aa3cb..b6ebf92d7e21 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1313,7 +1313,6 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
if (fault & DMA_FSTS_ITE) {
head = readl(iommu->reg + DMAR_IQH_REG);
head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
- head |= 1;
tail = readl(iommu->reg + DMAR_IQT_REG);
tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
@@ -1330,7 +1329,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
do {
if (qi->desc_status[head] == QI_IN_USE)
qi->desc_status[head] = QI_ABORT;
- head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+ head = (head - 1 + QI_LENGTH) % QI_LENGTH;
} while (head != tail);
/*
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 22d7dcc23629..b0eb6646a998 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3393,6 +3393,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int lpi_base;
int nr_lpis;
int nr_ites;
+ int id_bits;
int sz;
if (!its_alloc_device_table(its, dev_id))
@@ -3405,7 +3406,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
/*
* Even if the device wants a single LPI, the ITT must be
* sized as a power of two (and you need at least one bit...).
+ * Also honor the ITS's own EID limit.
*/
+ id_bits = FIELD_GET(GITS_TYPER_IDBITS, its->typer) + 1;
+ nvecs = min_t(unsigned int, nvecs, BIT(id_bits));
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 6f98065d8e5c..eb123d1e26b9 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -424,10 +424,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
if (!verity_fec_is_enabled(v))
return -EOPNOTSUPP;
- if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
- DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+ if (fio->level)
return -EIO;
- }
fio->level++;
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 09123a612953..ec37e607cb3f 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -23,9 +23,6 @@
#define DM_VERITY_FEC_BUF_MAX \
(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
-/* maximum recursion level for verity_fec_decode */
-#define DM_VERITY_FEC_MAX_RECURSION 4
-
#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
#define DM_VERITY_OPT_FEC_START "fec_start"
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 8bb8dd34c223..a2159b2bc176 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -228,6 +228,9 @@ static int handle_one_ule_extension( struct dvb_net_priv *p )
unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
unsigned char htype = p->ule_sndu_type & 0x00FF;
+ if (htype >= ARRAY_SIZE(ule_mandatory_ext_handlers))
+ return -1;
+
/* Discriminate mandatory and optional extension headers. */
if (hlen == 0) {
/* Mandatory extension header */
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 26d86d564249..ec72453203de 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -18,6 +18,8 @@
#define RK3288_CLKGEN_DIV 2
#define SDMMC_TIMING_CON0 0x130
#define SDMMC_TIMING_CON1 0x134
+#define SDMMC_MISC_CON 0x138
+#define MEM_CLK_AUTOGATE_ENABLE BIT(5)
#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DEGREE_OFFSET 1
@@ -35,6 +37,8 @@ struct dw_mci_rockchip_priv_data {
int default_sample_phase;
int num_phases;
bool internal_phase;
+ int sample_phase;
+ int drv_phase;
};
/*
@@ -469,6 +473,7 @@ static int dw_mci_rk3576_parse_dt(struct dw_mci *host)
static int dw_mci_rockchip_init(struct dw_mci *host)
{
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
int ret, i;
/* It is slot 8 on Rockchip SoCs */
@@ -493,6 +498,9 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
dev_warn(host->dev, "no valid minimum freq: %d\n", ret);
}
+ if (priv->internal_phase)
+ mci_writel(host, MISC_CON, MEM_CLK_AUTOGATE_ENABLE);
+
return 0;
}
@@ -567,12 +575,43 @@ static void dw_mci_rockchip_remove(struct platform_device *pdev)
dw_mci_pltfm_remove(pdev);
}
+static int dw_mci_rockchip_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_mci *host = platform_get_drvdata(pdev);
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+
+ if (priv->internal_phase) {
+ priv->sample_phase = rockchip_mmc_get_phase(host, true);
+ priv->drv_phase = rockchip_mmc_get_phase(host, false);
+ }
+
+ return dw_mci_runtime_suspend(dev);
+}
+
+static int dw_mci_rockchip_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_mci *host = platform_get_drvdata(pdev);
+ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ int ret;
+
+ ret = dw_mci_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ if (priv->internal_phase) {
+ rockchip_mmc_set_phase(host, true, priv->sample_phase);
+ rockchip_mmc_set_phase(host, false, priv->drv_phase);
+ mci_writel(host, MISC_CON, MEM_CLK_AUTOGATE_ENABLE);
+ }
+
+ return ret;
+}
+
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_rockchip_runtime_suspend, dw_mci_rockchip_runtime_resume, NULL)
};
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
@@ -582,7 +621,7 @@ static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.name = "dwmmc_rockchip",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_rockchip_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_rockchip_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/mmci_qcom_dml.c b/drivers/mmc/host/mmci_qcom_dml.c
index 3da6112fbe39..67371389cc33 100644
--- a/drivers/mmc/host/mmci_qcom_dml.c
+++ b/drivers/mmc/host/mmci_qcom_dml.c
@@ -109,6 +109,7 @@ static int of_get_dml_pipe_index(struct device_node *np, const char *name)
&dma_spec))
return -ENODEV;
+ of_node_put(dma_spec.np);
if (dma_spec.args_count)
return dma_spec.args[0];
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 8477b9dd80b7..b21bb1d55b4f 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -67,6 +67,9 @@
#define GLI_9750_MISC_TX1_DLY_VALUE 0x5
#define SDHCI_GLI_9750_MISC_SSC_OFF BIT(26)
+#define SDHCI_GLI_9750_GM_BURST_SIZE 0x510
+#define SDHCI_GLI_9750_GM_BURST_SIZE_R_OSRC_LMT GENMASK(17, 16)
+
#define SDHCI_GLI_9750_TUNING_CONTROL 0x540
#define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4)
#define GLI_9750_TUNING_CONTROL_EN_ON 0x1
@@ -271,10 +274,16 @@ static void gli_set_9750(struct sdhci_host *host)
u32 misc_value;
u32 parameter_value;
u32 control_value;
+ u32 burst_value;
u16 ctrl2;
gl9750_wt_on(host);
+ /* clear R_OSRC_Lmt to avoid DMA write corruption */
+ burst_value = sdhci_readl(host, SDHCI_GLI_9750_GM_BURST_SIZE);
+ burst_value &= ~SDHCI_GLI_9750_GM_BURST_SIZE_R_OSRC_LMT;
+ sdhci_writel(host, burst_value, SDHCI_GLI_9750_GM_BURST_SIZE);
+
driving_value = sdhci_readl(host, SDHCI_GLI_9750_DRIVING);
pll_value = sdhci_readl(host, SDHCI_GLI_9750_PLL);
sw_ctrl_value = sdhci_readl(host, SDHCI_GLI_9750_SW_CTRL);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 4b91c9e96635..bd67cbb9a19e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -4479,8 +4479,15 @@ int sdhci_setup_host(struct sdhci_host *host)
* their platform code before calling sdhci_add_host(), and we
* won't assume 8-bit width for hosts without that CAP.
*/
- if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
+ if (host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA) {
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
+ if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400)
+ host->caps1 &= ~SDHCI_SUPPORT_HS400;
+ mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+ mmc->caps &= ~(MMC_CAP_DDR | MMC_CAP_UHS);
+ } else {
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ }
if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
mmc->caps &= ~MMC_CAP_CMD23;
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 2eb44c1428fb..bbba3cf477e9 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2303,14 +2303,12 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < ctrl->max_oob; i += 4)
oob_reg_write(ctrl, i, 0xffffffff);
- if (mtd->oops_panic_write)
+ if (mtd->oops_panic_write) {
/* switch to interrupt polling and PIO mode */
disable_ctrl_irqs(ctrl);
-
- if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
+ } else if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) {
if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize,
CMD_PROGRAM_PAGE))
-
ret = -EIO;
goto out;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 5872f1dfe701..636af179579f 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2878,7 +2878,7 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
sizeof(*cdns_ctrl->cdma_desc),
&cdns_ctrl->dma_cdma_desc,
GFP_KERNEL);
- if (!cdns_ctrl->dma_cdma_desc)
+ if (!cdns_ctrl->cdma_desc)
return -ENOMEM;
cdns_ctrl->buf_size = SZ_16K;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 3e1844bfb808..d654e5f52ee0 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -4868,11 +4868,16 @@ static void nand_shutdown(struct mtd_info *mtd)
static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
if (!chip->ops.lock_area)
return -ENOTSUPP;
- return chip->ops.lock_area(chip, ofs, len);
+ nand_get_device(chip);
+ ret = chip->ops.lock_area(chip, ofs, len);
+ nand_release_device(chip);
+
+ return ret;
}
/**
@@ -4884,11 +4889,16 @@ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
if (!chip->ops.unlock_area)
return -ENOTSUPP;
- return chip->ops.unlock_area(chip, ofs, len);
+ nand_get_device(chip);
+ ret = chip->ops.unlock_area(chip, ofs, len);
+ nand_release_device(chip);
+
+ return ret;
}
/* Set default functions */
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 13c187af5a86..ffbf769e4fc0 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -862,6 +862,9 @@ static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
+ writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
+ pl35x_smc_update_regs(nfc);
+
return 0;
}
diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c
index 3b55b676ca6b..c06ba7a2a34b 100644
--- a/drivers/mtd/parsers/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -270,9 +270,9 @@ static int parse_redboot_partitions(struct mtd_info *master,
strcpy(names, fl->img->name);
#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
- if (!memcmp(names, "RedBoot", 8) ||
- !memcmp(names, "RedBoot config", 15) ||
- !memcmp(names, "FIS directory", 14)) {
+ if (!strcmp(names, "RedBoot") ||
+ !strcmp(names, "RedBoot config") ||
+ !strcmp(names, "FIS directory")) {
parts[i].mask_flags = MTD_WRITEABLE;
}
#endif
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 9d6e85bf227b..a3e6a8c28dfb 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2037,6 +2037,76 @@ static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
return info;
}
+/*
+ * On Octal DTR capable flashes, reads cannot start or end at an odd
+ * address in Octal DTR mode. Extra bytes need to be read at the start
+ * or end to make sure both the start address and length remain even.
+ */
+static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ u_char *tmp_buf;
+ size_t tmp_len;
+ loff_t start, end;
+ int ret, bytes_read;
+
+ if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_read_data(nor, from, len, buf);
+ else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
+ return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
+ buf);
+
+ tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ start = round_down(from, 2);
+ end = round_up(from + len, 2);
+
+ /*
+ * Avoid allocating too much memory. The requested read length might be
+ * quite large. Allocating a buffer just as large (slightly bigger, in
+ * fact) would put unnecessary memory pressure on the system.
+ *
+ * For example if the read is from 3 to 1M, then this will read from 2
+ * to 4098. The reads from 4098 to 1M will then not need a temporary
+ * buffer so they can proceed as normal.
+ */
+ tmp_len = min_t(size_t, end - start, PAGE_SIZE);
+
+ ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are read than actually requested, but that number can't be
+ * reported to the calling function or it will confuse its calculations.
+ * Calculate how many of the _requested_ bytes were read.
+ */
+ bytes_read = ret;
+
+ if (from != start)
+ ret -= from - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually read.
+ * For example, if the total length was truncated because of temporary
+ * buffer size limit then the adjustment for the extra bytes at the end
+ * is not needed.
+ */
+ if (start + bytes_read == end)
+ ret -= end - (from + len);
+
+ memcpy(buf, tmp_buf + (from - start), ret);
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -2054,7 +2124,11 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
while (len) {
loff_t addr = from;
- ret = spi_nor_read_data(nor, addr, len, buf);
+ if (nor->read_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_read(nor, addr, len, buf);
+ else
+ ret = spi_nor_read_data(nor, addr, len, buf);
+
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2077,6 +2151,68 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
return ret;
}
+/*
+ * On Octal DTR capable flashes, writes cannot start or end at an odd address
+ * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to
+ * make sure the start address and end address are even. 0xff is used because
+ * on NOR flashes a program operation can only flip bits from 1 to 0, not the
+ * other way round. 0 to 1 flip needs to happen via erases.
+ */
+static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ u8 *tmp_buf;
+ size_t bytes_written;
+ loff_t start, end;
+ int ret;
+
+ if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_write_data(nor, to, len, buf);
+
+ tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ memset(tmp_buf, 0xff, nor->params->page_size);
+
+ start = round_down(to, 2);
+ end = round_up(to + len, 2);
+
+ memcpy(tmp_buf + (to - start), buf, len);
+
+ ret = spi_nor_write_data(nor, start, end - start, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are written than actually requested, but that number can't
+ * be reported to the calling function or it will confuse its
+ * calculations. Calculate how many of the _requested_ bytes were
+ * written.
+ */
+ bytes_written = ret;
+
+ if (to != start)
+ ret -= to - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually
+ * written. For example, if for some reason the controller could only
+ * complete a partial write then the adjustment for the extra bytes at
+ * the end is not needed.
+ */
+ if (start + bytes_written == end)
+ ret -= end - (to + len);
+
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
/*
* Write an address range to the nor chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
@@ -2113,7 +2249,12 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
goto write_err;
}
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (nor->write_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_write(nor, addr, page_remain,
+ buf + i);
+ else
+ ret = spi_nor_write_data(nor, addr, page_remain,
+ buf + i);
spi_nor_unlock_device(nor);
if (ret < 0)
goto write_err;
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index b19492a7f6ad..3c1945c3e850 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -34,11 +34,17 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
for (; hash_index != RLB_NULL_INDEX;
hash_index = client_info->used_next) {
client_info = &(bond_info->rx_hashtbl[hash_index]);
- seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
- &client_info->ip_src,
- &client_info->ip_dst,
- &client_info->mac_dst,
- client_info->slave->dev->name);
+ if (client_info->slave)
+ seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
+ &client_info->ip_src,
+ &client_info->ip_dst,
+ &client_info->mac_dst,
+ client_info->slave->dev->name);
+ else
+ seq_printf(m, "%-15pI4 %-15pI4 %-17pM (none)\n",
+ &client_info->ip_src,
+ &client_info->ip_dst,
+ &client_info->mac_dst);
}
spin_unlock_bh(&bond->mode_lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ac455a9d1bb..5035cfa74f1a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1542,88 +1542,52 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
return features;
}
-#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
- NETIF_F_GSO_ENCAP_ALL | \
- NETIF_F_HIGHDMA | NETIF_F_LRO)
-
-#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
-
-#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_GSO_SOFTWARE)
-
-
-static void bond_compute_features(struct bonding *bond)
+static int bond_header_create(struct sk_buff *skb, struct net_device *bond_dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
{
- unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
- IFF_XMIT_DST_RELEASE_PERM;
- netdev_features_t vlan_features = BOND_VLAN_FEATURES;
- netdev_features_t enc_features = BOND_ENC_FEATURES;
-#ifdef CONFIG_XFRM_OFFLOAD
- netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
-#endif /* CONFIG_XFRM_OFFLOAD */
- netdev_features_t mpls_features = BOND_MPLS_FEATURES;
- struct net_device *bond_dev = bond->dev;
- struct list_head *iter;
+ struct bonding *bond = netdev_priv(bond_dev);
+ const struct header_ops *slave_ops;
struct slave *slave;
- unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int tso_max_size = TSO_MAX_SIZE;
- u16 tso_max_segs = TSO_MAX_SEGS;
-
- if (!bond_has_slaves(bond))
- goto done;
-
- vlan_features = netdev_base_features(vlan_features);
- mpls_features = netdev_base_features(mpls_features);
-
- bond_for_each_slave(bond, slave, iter) {
- vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features, BOND_VLAN_FEATURES);
-
- enc_features = netdev_increment_features(enc_features,
- slave->dev->hw_enc_features,
- BOND_ENC_FEATURES);
-
-#ifdef CONFIG_XFRM_OFFLOAD
- xfrm_features = netdev_increment_features(xfrm_features,
- slave->dev->hw_enc_features,
- BOND_XFRM_FEATURES);
-#endif /* CONFIG_XFRM_OFFLOAD */
-
- mpls_features = netdev_increment_features(mpls_features,
- slave->dev->mpls_features,
- BOND_MPLS_FEATURES);
-
- dst_release_flag &= slave->dev->priv_flags;
- if (slave->dev->hard_header_len > max_hard_header_len)
- max_hard_header_len = slave->dev->hard_header_len;
+ int ret = 0;
- tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
- tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave) {
+ slave_ops = READ_ONCE(slave->dev->header_ops);
+ if (slave_ops && slave_ops->create)
+ ret = slave_ops->create(skb, slave->dev,
+ type, daddr, saddr, len);
}
- bond_dev->hard_header_len = max_hard_header_len;
-
-done:
- bond_dev->vlan_features = vlan_features;
- bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
-#ifdef CONFIG_XFRM_OFFLOAD
- bond_dev->hw_enc_features |= xfrm_features;
-#endif /* CONFIG_XFRM_OFFLOAD */
- bond_dev->mpls_features = mpls_features;
- netif_set_tso_max_segs(bond_dev, tso_max_segs);
- netif_set_tso_max_size(bond_dev, tso_max_size);
+ rcu_read_unlock();
+ return ret;
+}
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
- if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
- dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
- bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+static int bond_header_parse(const struct sk_buff *skb,
+ const struct net_device *dev,
+ unsigned char *haddr)
+{
+ struct bonding *bond = netdev_priv(dev);
+ const struct header_ops *slave_ops;
+ struct slave *slave;
+ int ret = 0;
- netdev_change_features(bond_dev);
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave) {
+ slave_ops = READ_ONCE(slave->dev->header_ops);
+ if (slave_ops && slave_ops->parse)
+ ret = slave_ops->parse(skb, slave->dev, haddr);
+ }
+ rcu_read_unlock();
+ return ret;
}
+static const struct header_ops bond_header_ops = {
+ .create = bond_header_create,
+ .parse = bond_header_parse,
+};
+
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
@@ -1631,7 +1595,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
dev_close(bond_dev);
- bond_dev->header_ops = slave_dev->header_ops;
+ bond_dev->header_ops = slave_dev->header_ops ?
+ &bond_header_ops : NULL;
bond_dev->type = slave_dev->type;
bond_dev->hard_header_len = slave_dev->hard_header_len;
@@ -2370,7 +2335,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
bond->slave_cnt++;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond_set_carrier(bond);
/* Needs to be called before bond_select_active_slave(), which will
@@ -2622,7 +2587,7 @@ static int __bond_release_one(struct net_device *bond_dev,
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
@@ -2936,8 +2901,14 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
+ case BOND_LINK_FAIL:
+ case BOND_LINK_BACK:
+ slave_dbg(bond->dev, slave->dev, "link_new_state %d on slave\n",
+ slave->link_new_state);
+ continue;
+
default:
- slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
+ slave_err(bond->dev, slave->dev, "invalid link_new_state %d on slave\n",
slave->link_new_state);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
@@ -3518,7 +3489,7 @@ int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
} else if (is_arp) {
return bond_arp_rcv(skb, bond, slave);
#if IS_ENABLED(CONFIG_IPV6)
- } else if (is_ipv6) {
+ } else if (is_ipv6 && likely(ipv6_mod_enabled())) {
return bond_na_rcv(skb, bond, slave);
#endif
} else {
@@ -4120,7 +4091,7 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
if (!bond->notifier_ctx) {
bond->notifier_ctx = true;
- bond_compute_features(bond);
+ netdev_compute_master_upper_features(bond->dev, true);
bond->notifier_ctx = false;
}
break;
@@ -6058,7 +6029,7 @@ void bond_setup(struct net_device *bond_dev)
* capable
*/
- bond_dev->hw_features = BOND_VLAN_FEATURES |
+ bond_dev->hw_features = MASTER_UPPER_DEV_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_STAG_RX |
@@ -6067,6 +6038,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+ bond_dev->features |= NETIF_F_GSO_PARTIAL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
/* Only enable XFRM features if this is an active-backup config */
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 699ed0ff461e..6799dbf80f48 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -311,6 +311,7 @@ static void ser_release(struct work_struct *work)
dev_close(ser->dev);
unregister_netdevice(ser->dev);
debugfs_deinit(ser);
+ tty_kref_put(tty->link);
tty_kref_put(tty);
}
rtnl_unlock();
@@ -345,6 +346,7 @@ static int ldisc_open(struct tty_struct *tty)
ser = netdev_priv(dev);
ser->tty = tty_kref_get(tty);
+ tty_kref_get(tty->link);
ser->dev = dev;
debugfs_init(ser, tty);
tty->receive_room = N_TTY_BUF_SIZE;
@@ -353,6 +355,7 @@ static int ldisc_open(struct tty_struct *tty)
rtnl_lock();
result = register_netdevice(dev);
if (result) {
+ tty_kref_put(tty->link);
tty_kref_put(tty);
rtnl_unlock();
free_netdev(dev);
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index c9eba1d37b0e..10470e743615 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -756,7 +756,9 @@ static int hi3110_open(struct net_device *net)
return ret;
mutex_lock(&priv->hi3110_lock);
- hi3110_power_enable(priv->transceiver, 1);
+ ret = hi3110_power_enable(priv->transceiver, 1);
+ if (ret)
+ goto out_close_candev;
priv->force_quit = 0;
priv->tx_skb = NULL;
@@ -791,6 +793,7 @@ static int hi3110_open(struct net_device *net)
hi3110_hw_sleep(spi);
out_close:
hi3110_power_enable(priv->transceiver, 0);
+ out_close_candev:
close_candev(net);
mutex_unlock(&priv->hi3110_lock);
return ret;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index d1d1412c6565..ee3d49a686de 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -769,9 +769,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
}
-static int gs_usb_set_bittiming(struct net_device *netdev)
+static int gs_usb_set_bittiming(struct gs_can *dev)
{
- struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
struct gs_device_bittiming dbt = {
.prop_seg = cpu_to_le32(bt->prop_seg),
@@ -788,9 +787,8 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
GFP_KERNEL);
}
-static int gs_usb_set_data_bittiming(struct net_device *netdev)
+static int gs_usb_set_data_bittiming(struct gs_can *dev)
{
- struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.data_bittiming;
struct gs_device_bittiming dbt = {
.prop_seg = cpu_to_le32(bt->prop_seg),
@@ -1054,6 +1052,20 @@ static int gs_can_open(struct net_device *netdev)
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
flags |= GS_CAN_MODE_HW_TIMESTAMP;
+ rc = gs_usb_set_bittiming(dev);
+ if (rc) {
+ netdev_err(netdev, "failed to set bittiming: %pe\n", ERR_PTR(rc));
+ goto out_usb_kill_anchored_urbs;
+ }
+
+ if (ctrlmode & CAN_CTRLMODE_FD) {
+ rc = gs_usb_set_data_bittiming(dev);
+ if (rc) {
+ netdev_err(netdev, "failed to set data bittiming: %pe\n", ERR_PTR(rc));
+ goto out_usb_kill_anchored_urbs;
+ }
+ }
+
/* finally start device */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
dm.flags = cpu_to_le32(flags);
@@ -1354,7 +1366,6 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.state = CAN_STATE_STOPPED;
dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
- dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
@@ -1378,7 +1389,6 @@ static struct gs_can *gs_make_candev(unsigned int channel,
* GS_CAN_FEATURE_BT_CONST_EXT is set.
*/
dev->can.data_bittiming_const = &dev->bt_const;
- dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
if (feature & GS_CAN_FEATURE_TERMINATION) {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index f1372830d5fa..e680fff7d23f 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -980,15 +980,19 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("%s: failed to software reset switch\n", __func__);
+ if (!priv->wol_ports_mask)
+ clk_disable_unprepare(priv->clk);
return ret;
}
bcm_sf2_crossbar_setup(priv);
ret = bcm_sf2_cfp_resume(ds);
- if (ret)
+ if (ret) {
+ if (!priv->wol_ports_mask)
+ clk_disable_unprepare(priv->clk);
return ret;
-
+ }
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index b6fcdabbf5dd..0596bb7612e8 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -1101,6 +1101,7 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
struct ksz_irq *ptpirq = &port->ptpirq;
struct ksz_ptp_irq *ptpmsg_irq;
+ int ret;
ptpmsg_irq = &port->ptpmsg_irq[n];
ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
@@ -1112,9 +1113,13 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
- return request_threaded_irq(ptpmsg_irq->num, NULL,
- ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
- ptpmsg_irq->name, ptpmsg_irq);
+ ret = request_threaded_irq(ptpmsg_irq->num, NULL,
+ ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
+ ptpmsg_irq->name, ptpmsg_irq);
+ if (ret)
+ irq_dispose_mapping(ptpmsg_irq->num);
+
+ return ret;
}
int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 74a8336174e5..4cb986988f1a 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1480,8 +1480,7 @@ static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
cnt[RTL8365MB_MIB_ifInMulticastPkts] +
- cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
- cnt[RTL8365MB_MIB_ifOutDiscards];
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
index 99c890681ae6..509ffd3f8db5 100644
--- a/drivers/net/dsa/realtek/rtl8366rb-leds.c
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -12,11 +12,11 @@ static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
case 0:
return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
case 1:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ return FIELD_PREP(RTL8366RB_LED_X_1_CTRL_MASK, BIT(port));
case 2:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ return FIELD_PREP(RTL8366RB_LED_2_X_CTRL_MASK, BIT(port));
case 3:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ return FIELD_PREP(RTL8366RB_LED_X_3_CTRL_MASK, BIT(port));
default:
return 0;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index c6fcddbff3f5..418f4513a0b9 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1338,6 +1338,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
+ /* Synchronize flag with hardware state after enabling TX/RX.
+ * This prevents stale state after device restart cycles.
+ */
+ pdata->data_path_stopped = false;
udp_tunnel_nic_reset_ntf(netdev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6d2c401bb246..0a99a21af581 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -2050,7 +2050,7 @@ static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int reg;
+ int reg;
/* step 2: force PCS to send RX_ADAPT Req to PHY */
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
@@ -2072,11 +2072,20 @@ static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
/* Step 4: Check for Block lock */
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ goto set_mode;
+
+ /* Link status is latched low so that momentary link drops
+ * can be detected. If link was already down read again
+ * to get the latest state.
+ */
+ if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg < 0)
+ goto set_mode;
+ }
+
if (reg & MDIO_STAT1_LSTATUS) {
/* If the block lock is found, update the helpers
* and declare the link up
@@ -2116,6 +2125,48 @@ static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
xgbe_rx_adaptation(pdata);
}
+/*
+ * xgbe_phy_stop_data_path - Stop TX/RX to prevent packet corruption
+ * @pdata: driver private data
+ *
+ * This function stops the data path (TX and RX) to prevent packet
+ * corruption during critical PHY operations like RX adaptation.
+ * Must be called before initiating RX adaptation when link goes down.
+ */
+static void xgbe_phy_stop_data_path(struct xgbe_prv_data *pdata)
+{
+ if (pdata->data_path_stopped)
+ return;
+
+ /* Stop TX/RX to prevent packet corruption during RX adaptation */
+ pdata->hw_if.disable_tx(pdata);
+ pdata->hw_if.disable_rx(pdata);
+ pdata->data_path_stopped = true;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "stopping data path for RX adaptation\n");
+}
+
+/*
+ * xgbe_phy_start_data_path - Re-enable TX/RX after RX adaptation
+ * @pdata: driver private data
+ *
+ * This function re-enables the data path (TX and RX) after RX adaptation
+ * has completed successfully. Only called when link is confirmed up.
+ */
+static void xgbe_phy_start_data_path(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->data_path_stopped)
+ return;
+
+ pdata->hw_if.enable_rx(pdata);
+ pdata->hw_if.enable_tx(pdata);
+ pdata->data_path_stopped = false;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "restarting data path after RX adaptation\n");
+}
+
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
{
int reg;
@@ -2909,13 +2960,27 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
if (pdata->en_rx_adap) {
/* if the link is available and adaptation is done,
* declare link up
+ *
+ * Note: When link is up and adaptation is done, we can
+ * safely re-enable the data path if it was stopped
+ * for adaptation.
*/
- if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) {
+ xgbe_phy_start_data_path(pdata);
return 1;
+ }
/* If either link is not available or adaptation is not done,
* retrigger the adaptation logic. (if the mode is not set,
* then issue mailbox command first)
*/
+
+ /* CRITICAL: Stop data path BEFORE triggering RX adaptation
+ * to prevent CRC errors from packets corrupted during
+ * the adaptation process. This is especially important
+ * when AN is OFF in 10G KR mode.
+ */
+ xgbe_phy_stop_data_path(pdata);
+
if (pdata->mode_set) {
xgbe_phy_rx_adaptation(pdata);
} else {
@@ -2923,8 +2988,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
}
- if (pdata->rx_adapt_done)
+ if (pdata->rx_adapt_done) {
+ /* Adaptation complete, safe to re-enable data path */
+ xgbe_phy_start_data_path(pdata);
return 1;
+ }
} else if (reg & MDIO_STAT1_LSTATUS)
return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index c98461252053..ebe504cb9a11 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -1321,6 +1321,10 @@ struct xgbe_prv_data {
bool en_rx_adap;
int rx_adapt_retries;
bool rx_adapt_done;
+ /* Flag to track if data path (TX/RX) was stopped for RX adaptation.
+ * This prevents packet corruption during the adaptation window.
+ */
+ bool data_path_stopped;
bool mode_set;
};
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 8283aeee35fb..dde4046cbf01 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -934,6 +934,17 @@ int arc_emac_probe(struct net_device *ndev, int interface)
/* Set poll rate so that it polls every 1 ms */
arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
+ /*
+ * Put the device into a known quiescent state before requesting
+ * the IRQ. Clear only EMAC interrupt status bits here; leave the
+ * MDIO completion bit alone and avoid writing TXPL_MASK, which is
+ * used to force TX polling rather than acknowledge interrupts.
+ */
+ arc_reg_set(priv, R_ENABLE, 0);
+ arc_reg_set(priv, R_STATUS, RXINT_MASK | TXINT_MASK | ERR_MASK |
+ TXCH_MASK | MSER_MASK | RXCR_MASK |
+ RXFR_MASK | RXFL_MASK);
+
ndev->irq = irq;
dev_info(dev, "IRQ is %d\n", ndev->irq);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 0a8f3dc3c2f0..0be9c64ae2fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -958,8 +958,8 @@ static int bnxt_set_channels(struct net_device *dev,
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
- netif_is_rxfh_configured(dev)) {
- netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
+ (netif_is_rxfh_configured(dev) || bp->num_rss_ctx)) {
+ netdev_warn(dev, "RSS table size change required, RSS table entries must be default (with no additional RSS contexts present) to proceed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f7be886570d8..49f6e83d6013 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1272,8 +1272,7 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
}
}
-void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
- bool tx_lpi_enabled)
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@@ -1293,7 +1292,7 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
/* Enable EEE and switch to a 27Mhz clock automatically */
reg = bcmgenet_readl(priv->base + off);
- if (tx_lpi_enabled)
+ if (enable)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@@ -1312,14 +1311,12 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
priv->clk_eee_enabled = false;
}
- priv->eee.eee_enabled = enable;
- priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_keee *p = &priv->eee;
+ int ret;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1327,17 +1324,21 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
+ ret = phy_ethtool_get_eee(dev->phydev, e);
+ if (ret)
+ return ret;
+
+ /* tx_lpi_timer is maintained by the MAC hardware register; the
+ * PHY-level eee_cfg timer is not set for GENET.
+ */
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
- return phy_ethtool_get_eee(dev->phydev, e);
+ return 0;
}
static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_keee *p = &priv->eee;
- bool active;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1345,15 +1346,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
if (!dev->phydev)
return -ENODEV;
- p->eee_enabled = e->eee_enabled;
-
- if (!p->eee_enabled) {
- bcmgenet_eee_enable_set(dev, false, false);
- } else {
- active = phy_init_eee(dev->phydev, false) >= 0;
- bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
- }
+ bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
return phy_ethtool_set_eee(dev->phydev, e);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 43b923c48b14..c0005a0fff56 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -646,8 +646,6 @@ struct bcmgenet_priv {
bool wol_active;
struct bcmgenet_mib_counters mib;
-
- struct ethtool_keee eee;
};
#define GENET_IO_MACRO(name, offset) \
@@ -705,7 +703,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
- bool tx_lpi_enabled);
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable);
#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 3b082114f2e5..2033fb9d893e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -123,7 +123,7 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS)
& RBUF_STATUS_WOL)) {
retries++;
- if (retries > 5) {
+ if (retries > 50) {
netdev_crit(dev, "polling wol mode timeout\n");
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c4a3698cef66..9beb65e6d0a9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -30,7 +30,6 @@ static void bcmgenet_mac_config(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
- bool active;
/* speed */
if (phydev->speed == SPEED_1000)
@@ -91,10 +90,6 @@ static void bcmgenet_mac_config(struct net_device *dev)
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmgenet_eee_enable_set(dev,
- priv->eee.eee_enabled && active,
- priv->eee.tx_lpi_enabled);
}
/* setup netdev link state when PHY link status change and
@@ -114,6 +109,8 @@ void bcmgenet_mii_setup(struct net_device *dev)
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
+ bcmgenet_eee_enable_set(dev, phydev->enable_tx_lpi);
+
phy_print_status(phydev);
}
@@ -408,6 +405,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
/* Indicate that the MAC is responsible for PHY PM */
dev->phydev->mac_managed_pm = true;
+ if (!GENET_IS_V1(priv))
+ phy_support_eee(dev->phydev);
+
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f258c4b82c74..89aa50893d36 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -38,6 +38,7 @@
#include <linux/ptp_classify.h>
#include <linux/reset.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/gcd.h>
#include <linux/inetdevice.h>
#include "macb.h"
@@ -719,6 +720,97 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
netif_tx_stop_all_queues(ndev);
}
+/* Use juggling algorithm to left rotate tx ring and tx skb array */
+static void gem_shuffle_tx_one_ring(struct macb_queue *queue)
+{
+ unsigned int head, tail, count, ring_size, desc_size;
+ struct macb_tx_skb tx_skb, *skb_curr, *skb_next;
+ struct macb_dma_desc *desc_curr, *desc_next;
+ unsigned int i, cycles, shift, curr, next;
+ struct macb *bp = queue->bp;
+ unsigned char desc[24];
+ unsigned long flags;
+
+ desc_size = macb_dma_desc_get_size(bp);
+
+ if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc)))
+ return;
+
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
+ head = queue->tx_head;
+ tail = queue->tx_tail;
+ ring_size = bp->tx_ring_size;
+ count = CIRC_CNT(head, tail, ring_size);
+
+ if (!(tail % ring_size))
+ goto unlock;
+
+ if (!count) {
+ queue->tx_head = 0;
+ queue->tx_tail = 0;
+ goto unlock;
+ }
+
+ shift = tail % ring_size;
+ cycles = gcd(ring_size, shift);
+
+ for (i = 0; i < cycles; i++) {
+ memcpy(&desc, macb_tx_desc(queue, i), desc_size);
+ memcpy(&tx_skb, macb_tx_skb(queue, i),
+ sizeof(struct macb_tx_skb));
+
+ curr = i;
+ next = (curr + shift) % ring_size;
+
+ while (next != i) {
+ desc_curr = macb_tx_desc(queue, curr);
+ desc_next = macb_tx_desc(queue, next);
+
+ memcpy(desc_curr, desc_next, desc_size);
+
+ if (next == ring_size - 1)
+ desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
+ if (curr == ring_size - 1)
+ desc_curr->ctrl |= MACB_BIT(TX_WRAP);
+
+ skb_curr = macb_tx_skb(queue, curr);
+ skb_next = macb_tx_skb(queue, next);
+ memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb));
+
+ curr = next;
+ next = (curr + shift) % ring_size;
+ }
+
+ desc_curr = macb_tx_desc(queue, curr);
+ memcpy(desc_curr, &desc, desc_size);
+ if (i == ring_size - 1)
+ desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
+ if (curr == ring_size - 1)
+ desc_curr->ctrl |= MACB_BIT(TX_WRAP);
+ memcpy(macb_tx_skb(queue, curr), &tx_skb,
+ sizeof(struct macb_tx_skb));
+ }
+
+ queue->tx_head = count;
+ queue->tx_tail = 0;
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+unlock:
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+}
+
+/* Rotate the queue so that the tail is at index 0 */
+static void gem_shuffle_tx_rings(struct macb *bp)
+{
+ struct macb_queue *queue;
+ int q;
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++)
+ gem_shuffle_tx_one_ring(queue);
+}
+
static void macb_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
@@ -757,8 +849,6 @@ static void macb_mac_link_up(struct phylink_config *config,
ctrl |= MACB_BIT(PAE);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue->tx_head = 0;
- queue->tx_tail = 0;
queue_writel(queue, IER,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
}
@@ -772,8 +862,10 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
- if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
macb_set_tx_clk(bp, speed);
+ gem_shuffle_tx_rings(bp);
+ }
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
@@ -2608,6 +2700,14 @@ static void macb_init_tieoff(struct macb *bp)
desc->ctrl = 0;
}
+static void gem_init_rx_ring(struct macb_queue *queue)
+{
+ queue->rx_tail = 0;
+ queue->rx_prepared_head = 0;
+
+ gem_rx_refill(queue);
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2625,10 +2725,7 @@ static void gem_init_rings(struct macb *bp)
queue->tx_head = 0;
queue->tx_tail = 0;
- queue->rx_tail = 0;
- queue->rx_prepared_head = 0;
-
- gem_rx_refill(queue);
+ gem_init_rx_ring(queue);
}
macb_init_tieoff(bp);
@@ -3748,6 +3845,9 @@ static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
struct macb *bp = netdev_priv(netdev);
int ret;
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.location >= bp->max_tuples)
@@ -5430,8 +5530,18 @@ static int __maybe_unused macb_resume(struct device *dev)
rtnl_unlock();
}
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_init_buffers(bp);
+
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
+ if (macb_is_gem(bp))
+ gem_init_rx_ring(queue);
+ else
+ macb_init_rx_ring(queue);
+ }
+
napi_enable(&queue->napi_rx);
napi_enable(&queue->napi_tx);
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index a63bf29c4fa8..f2b09100f710 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -355,8 +355,10 @@ void gem_ptp_remove(struct net_device *ndev)
{
struct macb *bp = netdev_priv(ndev);
- if (bp->ptp_clock)
+ if (bp->ptp_clock) {
ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
gem_ptp_clear_timer(bp);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 26053cc85d1c..62a6df009cda 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -157,6 +157,24 @@ gve_free_pending_packet(struct gve_tx_ring *tx,
}
}
+static void gve_unmap_packet(struct device *dev,
+ struct gve_tx_pending_packet_dqo *pkt)
+{
+ int i;
+
+ if (!pkt->num_bufs)
+ return;
+
+ /* SKB linear portion is guaranteed to be mapped */
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
+ }
+ pkt->num_bufs = 0;
+}
+
/* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
*/
static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
@@ -166,21 +184,12 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
for (i = 0; i < tx->dqo.num_pending_packets; i++) {
struct gve_tx_pending_packet_dqo *cur_state =
&tx->dqo.pending_packets[i];
- int j;
-
- for (j = 0; j < cur_state->num_bufs; j++) {
- if (j == 0) {
- dma_unmap_single(tx->dev,
- dma_unmap_addr(cur_state, dma[j]),
- dma_unmap_len(cur_state, len[j]),
- DMA_TO_DEVICE);
- } else {
- dma_unmap_page(tx->dev,
- dma_unmap_addr(cur_state, dma[j]),
- dma_unmap_len(cur_state, len[j]),
- DMA_TO_DEVICE);
- }
- }
+
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, cur_state);
+ else
+ gve_unmap_packet(tx->dev, cur_state);
+
if (cur_state->skb) {
dev_consume_skb_any(cur_state->skb);
cur_state->skb = NULL;
@@ -1039,21 +1048,6 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
}
-static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pkt)
-{
- int i;
-
- /* SKB linear portion is guaranteed to be mapped */
- dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
- dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
- for (i = 1; i < pkt->num_bufs; i++) {
- dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
- dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
- }
- pkt->num_bufs = 0;
-}
-
/* Completion types and expected behavior:
* No Miss compl + Packet compl = Packet completed normally.
* Miss compl + Re-inject compl = Packet completed normally.
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 67d7651b6411..8072aa8f05e3 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2948,8 +2948,6 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
dma_error:
dev_err(&pdev->dev, "TX DMA map failed\n");
buffer_info->dma = 0;
- if (count)
- count--;
while (count--) {
if (i == 0)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 5fe54e9b71e2..4d9dcb0001d2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5633,8 +5633,6 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
dma_error:
dev_err(&pdev->dev, "Tx DMA map failed\n");
buffer_info->dma = 0;
- if (count)
- count--;
while (count--) {
if (i == 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3251ffa7d994..9cf5b6349b0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3821,10 +3821,10 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
cfilter.n_proto = ETH_P_IP;
if (mask.dst_ip[0] & tcf.dst_ip[0])
memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
- ARRAY_SIZE(tcf.dst_ip));
- else if (mask.src_ip[0] & tcf.dst_ip[0])
+ sizeof(cfilter.ip.v4.dst_ip));
+ else if (mask.src_ip[0] & tcf.src_ip[0])
memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
- ARRAY_SIZE(tcf.dst_ip));
+ sizeof(cfilter.ip.v4.src_ip));
break;
case VIRTCHNL_TCP_V6_FLOW:
cfilter.n_proto = ETH_P_IPV6;
@@ -3879,7 +3879,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
/* for ipv6, mask is set for all sixteen bytes (4 words) */
if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
- sizeof(cfilter.ip.v6.src_ip6)))
+ sizeof(cfilter.ip.v6.dst_ip6)))
continue;
if (mask.vlan_id)
if (cfilter.vlan_id != cf->vlan_id)
@@ -3967,10 +3967,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
cfilter->n_proto = ETH_P_IP;
if (mask.dst_ip[0] & tcf.dst_ip[0])
memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
- ARRAY_SIZE(tcf.dst_ip));
- else if (mask.src_ip[0] & tcf.dst_ip[0])
+ sizeof(cfilter->ip.v4.dst_ip));
+ else if (mask.src_ip[0] & tcf.src_ip[0])
memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
- ARRAY_SIZE(tcf.dst_ip));
+ sizeof(cfilter->ip.v4.src_ip));
break;
case VIRTCHNL_TCP_V6_FLOW:
cfilter->n_proto = ETH_P_IPV6;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index dcd4f172ddc8..5f07f37933a0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -774,10 +774,13 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
adapter->num_vlan_filters++;
iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
} else if (f->state == IAVF_VLAN_REMOVE) {
- /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed.
- * We can safely only change the state here.
+ /* Re-add the filter since we cannot tell whether the
+ * pending delete has already been processed by the PF.
+ * A duplicate add is harmless.
*/
- f->state = IAVF_VLAN_ACTIVE;
+ f->state = IAVF_VLAN_ADD;
+ iavf_schedule_aq_request(adapter,
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER);
}
clearout:
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 36b391276187..484e48135e17 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1554,6 +1554,7 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
case ice_aqc_opc_lldp_stop:
case ice_aqc_opc_lldp_start:
case ice_aqc_opc_lldp_filter_ctrl:
+ case ice_aqc_opc_sff_eeprom:
return true;
}
@@ -1579,6 +1580,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
{
struct ice_aq_desc desc_cpy;
bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
int status;
@@ -1588,8 +1590,11 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
memset(&desc_cpy, 0, sizeof(desc_cpy));
if (is_cmd_for_retry) {
- /* All retryable cmds are direct, without buf. */
- WARN_ON(buf);
+ if (buf) {
+ buf_cpy = kmemdup(buf, buf_size, GFP_KERNEL);
+ if (!buf_cpy)
+ return -ENOMEM;
+ }
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
}
@@ -1601,12 +1606,14 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
break;
+ if (buf_cpy)
+ memcpy(buf, buf_cpy, buf_size);
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
-
msleep(ICE_SQ_SEND_DELAY_TIME_MS);
} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
+ kfree(buf_cpy);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 5379fbe06b07..b2183d5670b8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4528,7 +4528,7 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
bool is_sfp = false;
- unsigned int i, j;
+ unsigned int i;
u16 offset = 0;
u8 page = 0;
int status;
@@ -4570,26 +4570,19 @@ ice_get_module_eeprom(struct net_device *netdev,
if (page == 0 || !(data[0x2] & 0x4)) {
u32 copy_len;
- /* If i2c bus is busy due to slow page change or
- * link management access, call can fail. This is normal.
- * So we retry this a few times.
- */
- for (j = 0; j < 4; j++) {
- status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
- !is_sfp, value,
- SFF_READ_BLOCK_SIZE,
- 0, NULL);
- netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n",
- addr, offset, page, is_sfp,
- value[0], value[1], value[2], value[3],
- value[4], value[5], value[6], value[7],
- status);
- if (status) {
- usleep_range(1500, 2500);
- memset(value, 0, SFF_READ_BLOCK_SIZE);
- continue;
- }
- break;
+ status = ice_aq_sff_eeprom(hw, 0, addr, offset, page,
+ !is_sfp, value,
+ SFF_READ_BLOCK_SIZE,
+ 0, NULL);
+ netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%pe)\n",
+ addr, offset, page, is_sfp,
+ value[0], value[1], value[2], value[3],
+ value[4], value[5], value[6], value[7],
+ ERR_PTR(status));
+ if (status) {
+ netdev_err(netdev, "%s: error reading module EEPROM: status %pe\n",
+ __func__, ERR_PTR(status));
+ return status;
}
/* Make sure we have enough room for the new block */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8e0f180ec38e..8683883e23b1 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4920,6 +4920,7 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_dpll_deinit(pf);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
xa_destroy(&pf->eswitch.reprs);
+ ice_hwmon_exit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5451,8 +5452,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
- ice_hwmon_exit(pf);
-
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
set_bit(ICE_DOWN, pf->state);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 79d5fc5ac4fc..24949a50037e 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -745,6 +745,8 @@ ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
+void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter,
+ u16 queue_id);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 18dad521aefc..6fcf4fd7ee19 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -264,6 +264,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
/* reset next_to_use and next_to_clean */
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+
+ /* Clear any lingering XSK TX timestamp requests */
+ if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+ igc_ptp_clear_xsk_tx_tstamp_queue(adapter, tx_ring->queue_index);
+ }
}
/**
@@ -1704,11 +1711,8 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
- return NETDEV_TX_OK;
- skb->len = 17;
- }
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index a272d1a29ead..9ff73e7532e5 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -587,6 +587,39 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
}
+/**
+ * igc_ptp_clear_xsk_tx_tstamp_queue - Clear pending XSK TX timestamps for a queue
+ * @adapter: Board private structure
+ * @queue_id: TX queue index to clear timestamps for
+ *
+ * Iterates over all TX timestamp registers and releases any pending
+ * timestamp requests associated with the given TX queue. This is
+ * called when an XDP pool is being disabled to ensure no stale
+ * timestamp references remain.
+ */
+void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, u16 queue_id)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
+
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
+
+ if (tstamp->buffer_type != IGC_TX_BUFFER_TYPE_XSK)
+ continue;
+ if (tstamp->xsk_queue_index != queue_id)
+ continue;
+ if (!tstamp->xsk_tx_buffer)
+ continue;
+
+ igc_ptp_free_tx_buffer(adapter, tstamp);
+ }
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
+}
+
static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 65257107dfc8..708d5dd921ac 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -852,7 +852,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!mac->get_link_status)
goto out;
- if (hw->mac.type == ixgbe_mac_e610_vf) {
+ if (hw->mac.type == ixgbe_mac_e610_vf &&
+ hw->api_version >= ixgbe_mbox_api_16) {
ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 66b5a80c9c28..51e35c4d9ea9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5025,7 +5025,7 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
if (priv->percpu_pools)
numbufs = port->nrxqs * 2;
- if (change_percpu)
+ if (change_percpu && priv->global_tx_fc)
mvpp2_bm_pool_update_priv_fc(priv, false);
for (i = 0; i < numbufs; i++)
@@ -5050,7 +5050,7 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
mvpp2_open(port->dev);
}
- if (change_percpu)
+ if (change_percpu && priv->global_tx_fc)
mvpp2_bm_pool_update_priv_fc(priv, true);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 992fa0b82e8d..d0215f5e9a06 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -1224,6 +1224,9 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
u8 cgx_idx, lmac;
void *cgxd;
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 06f778baaeef..6f8914431de4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -327,10 +327,10 @@ static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_RAS:
- intr_val = nix_event_context->nix_af_rvu_err;
+ intr_val = nix_event_context->nix_af_rvu_ras;
rvu_report_pair_start(fmsg, "NIX_AF_RAS");
devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
- nix_event_context->nix_af_rvu_err);
+ nix_event_context->nix_af_rvu_ras);
devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
if (intr_val & BIT_ULL(34))
devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
@@ -475,7 +475,7 @@ static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
if (blkaddr < 0)
return blkaddr;
- if (nix_event_ctx->nix_af_rvu_int)
+ if (nix_event_ctx->nix_af_rvu_ras)
rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index 38cfe148f4b7..f85e57b07c57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -56,7 +56,7 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
- if (rvu->fwdata->channel_data.valid) {
+ if (rvu->fwdata && rvu->fwdata->channel_data.valid) {
sdp_pf_num[0] = 0;
pfvf = &rvu->pf[sdp_pf_num[0]];
pfvf->sdp_info = &rvu->fwdata->channel_data.info;
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
index 20cf7ba9d750..da259c4b03fb 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -752,11 +752,9 @@ struct airoha_tx_irq_queue {
struct airoha_qdma *qdma;
struct napi_struct napi;
- u32 *q;
int size;
- int queued;
- u16 head;
+ u32 *q;
};
struct airoha_hw_stats {
@@ -1116,17 +1114,23 @@ static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
}
+static u32 airoha_fe_get_pse_all_rsv(struct airoha_eth *eth)
+{
+ u32 val = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+
+ return FIELD_GET(PSE_ALLRSV_MASK, val);
+}
+
static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
u32 port, u32 queue, u32 val)
{
- u32 orig_val, tmp, all_rsv, fq_limit;
+ u32 orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+ u32 tmp, all_rsv, fq_limit;
airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
/* modify all rsv */
- orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
- tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
- all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
all_rsv += (val - orig_val);
airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
@@ -1166,11 +1170,13 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
[FE_PSE_PORT_GDM4] = 2,
[FE_PSE_PORT_CDM5] = 2,
};
+ u32 all_rsv;
int q;
+ all_rsv = airoha_fe_get_pse_all_rsv(eth);
/* hw misses PPE2 oq rsv */
- airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
- PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
+ all_rsv += PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2];
+ airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
/* CMD1 */
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
@@ -1647,25 +1653,31 @@ static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
+ int id, done = 0, irq_queued;
struct airoha_qdma *qdma;
struct airoha_eth *eth;
- int id, done = 0;
+ u32 status, head;
irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
qdma = irq_q->qdma;
id = irq_q - &qdma->q_tx_irq[0];
eth = qdma->eth;
- while (irq_q->queued > 0 && done < budget) {
- u32 qid, last, val = irq_q->q[irq_q->head];
+ status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ head = head % irq_q->size;
+ irq_queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ while (irq_queued > 0 && done < budget) {
+ u32 qid, last, val = irq_q->q[head];
struct airoha_queue *q;
if (val == 0xff)
break;
- irq_q->q[irq_q->head] = 0xff; /* mark as done */
- irq_q->head = (irq_q->head + 1) % irq_q->size;
- irq_q->queued--;
+ irq_q->q[head] = 0xff; /* mark as done */
+ head = (head + 1) % irq_q->size;
+ irq_queued--;
done++;
last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
@@ -2015,20 +2027,11 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
if (intr[0] & INT_TX_MASK) {
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
- struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i];
- u32 status, head;
-
if (!(intr[0] & TX_DONE_INT_MASK(i)))
continue;
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
TX_DONE_INT_MASK(i));
-
- status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
- head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
- irq_q->head = head % irq_q->size;
- irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
-
napi_schedule(&qdma->q_tx_irq[i].napi);
}
}
@@ -2781,7 +2784,6 @@ static void airoha_remove(struct platform_device *pdev)
if (!port)
continue;
- airoha_dev_stop(port->dev);
unregister_netdev(port->dev);
}
free_netdev(eth->napi_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index dbd9482359e1..74f9703013b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -45,7 +45,6 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
"SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
sq->sqn, sq->cc, sq->pc);
sq->cc = 0;
- sq->dma_fifo_cc = 0;
sq->pc = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index a37c8a117d80..2e5ca1cc29bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -274,6 +274,7 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_limits limits;
u32 rx_mapped_id;
+ u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
};
struct mlx5_accel_pol_xfrm_attrs {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 831d4b17ad07..c48eeb399a42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2449,7 +2449,7 @@ void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
goto out;
peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
- if (peer_priv)
+ if (peer_priv && peer_priv->ipsec)
complete_all(&peer_priv->ipsec->comp);
mlx5_devcom_for_each_peer_end(priv->devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 820debf3fbbf..40fe3d1e2342 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -311,10 +311,11 @@ static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
mlx5e_ipsec_aso_query(sa_entry, data);
}
-static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
- u32 mode_param)
+static void
+mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u32 mode_param,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
{
- struct mlx5_accel_esp_xfrm_attrs attrs = {};
struct mlx5_wqe_aso_ctrl_seg data = {};
if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
@@ -324,18 +325,7 @@ static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
sa_entry->esn_state.overlap = 1;
}
- mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
-
- /* It is safe to execute the modify below unlocked since the only flows
- * that could affect this HW object, are create, destroy and this work.
- *
- * Creation flow can't co-exist with this modify work, the destruction
- * flow would cancel this work, and this work is a single entity that
- * can't conflict with it self.
- */
- spin_unlock_bh(&sa_entry->x->lock);
- mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
- spin_lock_bh(&sa_entry->x->lock);
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, attrs);
data.data_offset_condition_operand =
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
@@ -371,20 +361,18 @@ static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- struct mlx5e_ipsec_aso *aso = ipsec->aso;
bool soft_arm, hard_arm;
u64 hard_cnt;
lockdep_assert_held(&sa_entry->x->lock);
- soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
- hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
+ soft_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, soft_lft_arm);
+ hard_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, hard_lft_arm);
if (!soft_arm && !hard_arm)
/* It is not lifetime event */
return;
- hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
+ hard_cnt = MLX5_GET(ipsec_aso, sa_entry->ctx, remove_flow_pkt_cnt);
if (!hard_cnt || hard_arm) {
/* It is possible to see packet counter equal to zero without
* hard limit event armed. Such situation can be if packet
@@ -454,11 +442,11 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
struct mlx5e_ipsec_work *work =
container_of(_work, struct mlx5e_ipsec_work, work);
struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
+ struct mlx5_accel_esp_xfrm_attrs tmp = {};
struct mlx5_accel_esp_xfrm_attrs *attrs;
- struct mlx5e_ipsec_aso *aso;
+ bool need_modify = false;
int ret;
- aso = sa_entry->ipsec->aso;
attrs = &sa_entry->attrs;
spin_lock_bh(&sa_entry->x->lock);
@@ -466,18 +454,22 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
if (ret)
goto unlock;
+ if (attrs->lft.soft_packet_limit != XFRM_INF)
+ mlx5e_ipsec_handle_limits(sa_entry);
+
if (attrs->replay_esn.trigger &&
- !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
- u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
+ !MLX5_GET(ipsec_aso, sa_entry->ctx, esn_event_arm)) {
+ u32 mode_param = MLX5_GET(ipsec_aso, sa_entry->ctx,
+ mode_parameter);
- mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
+ mlx5e_ipsec_update_esn_state(sa_entry, mode_param, &tmp);
+ need_modify = true;
}
- if (attrs->lft.soft_packet_limit != XFRM_INF)
- mlx5e_ipsec_handle_limits(sa_entry);
-
unlock:
spin_unlock_bh(&sa_entry->x->lock);
+ if (need_modify)
+ mlx5_accel_esp_modify_xfrm(sa_entry, &tmp);
kfree(work);
}
@@ -630,6 +622,8 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
/* We are in atomic context */
udelay(10);
} while (ret && time_is_after_jiffies(expires));
+ if (!ret)
+ memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso));
spin_unlock_bh(&aso->lock);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d8c304427e2a..8c2e1d881a1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -713,24 +713,24 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
-static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+static u32 mlx5_esw_qos_lag_link_speed_get(struct mlx5_core_dev *mdev,
+ bool take_rtnl)
{
struct ethtool_link_ksettings lksettings;
struct net_device *slave, *master;
u32 speed = SPEED_UNKNOWN;
- /* Lock ensures a stable reference to master and slave netdevice
- * while port speed of master is queried.
- */
- ASSERT_RTNL();
-
slave = mlx5_uplink_netdev_get(mdev);
if (!slave)
goto out;
+ if (take_rtnl)
+ rtnl_lock();
master = netdev_master_upper_dev_get(slave);
if (master && !__ethtool_get_link_ksettings(master, &lksettings))
speed = lksettings.base.speed;
+ if (take_rtnl)
+ rtnl_unlock();
out:
mlx5_uplink_netdev_put(mdev, slave);
@@ -738,20 +738,15 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
}
static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
- bool hold_rtnl_lock, struct netlink_ext_ack *extack)
+ bool take_rtnl,
+ struct netlink_ext_ack *extack)
{
int err;
if (!mlx5_lag_is_active(mdev))
goto skip_lag;
- if (hold_rtnl_lock)
- rtnl_lock();
-
- *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
-
- if (hold_rtnl_lock)
- rtnl_unlock();
+ *link_speed_max = mlx5_esw_qos_lag_link_speed_get(mdev, take_rtnl);
if (*link_speed_max != (u32)SPEED_UNKNOWN)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6544546a1153..864e88f05771 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1038,6 +1038,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
+static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ esw->esw_funcs.host_funcs_disabled =
+ MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_pf_not_exist);
+
+ kvfree(query_host_out);
+ return 0;
+}
+
static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
@@ -1049,10 +1068,11 @@ static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
{
- if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS &&
+ mlx5_eswitch_is_funcs_handler(esw->dev)) {
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
-
- flush_workqueue(esw->work_queue);
+ atomic_inc(&esw->esw_funcs.generation);
+ }
}
static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
@@ -1871,6 +1891,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ err = mlx5_esw_host_functions_enabled_query(esw);
+ if (err)
+ goto abort;
+
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 48fd0400ffd4..63c2b36ce967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -312,10 +312,13 @@ struct esw_mc_addr { /* SRIOV only */
struct mlx5_host_work {
struct work_struct work;
struct mlx5_eswitch *esw;
+ int work_gen;
};
struct mlx5_esw_functions {
struct mlx5_nb nb;
+ atomic_t generation;
+ bool host_funcs_disabled;
u16 num_vfs;
u16 num_ec_vfs;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 7cead1ba0bfa..b122003d8bcd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3402,22 +3402,28 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
}
static void
-esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
+esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, int work_gen,
+ const u32 *out)
{
struct devlink *devlink;
bool host_pf_disabled;
u16 new_num_vfs;
+ devlink = priv_to_devlink(esw->dev);
+ devl_lock(devlink);
+
+ /* Stale work from one or more mode changes ago. Bail out. */
+ if (work_gen != atomic_read(&esw->esw_funcs.generation))
+ goto unlock;
+
new_num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_pf_disabled);
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
- return;
+ goto unlock;
- devlink = priv_to_devlink(esw->dev);
- devl_lock(devlink);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -3432,6 +3438,7 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
}
}
esw->esw_funcs.num_vfs = new_num_vfs;
+unlock:
devl_unlock(devlink);
}
@@ -3448,7 +3455,7 @@ static void esw_functions_changed_event_handler(struct work_struct *work)
if (IS_ERR(out))
goto out;
- esw_vfs_changed_event_handler(esw, out);
+ esw_vfs_changed_event_handler(esw, host_work->work_gen, out);
kvfree(out);
out:
kfree(host_work);
@@ -3468,6 +3475,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
host_work->esw = esw;
+ host_work->work_gen = atomic_read(&esw_funcs->generation);
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..e07d0a952978 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -778,9 +778,6 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
gc->max_num_cqs = 0;
}
- kfree(hwc->caller_ctx);
- hwc->caller_ctx = NULL;
-
if (hwc->txq)
mana_hwc_destroy_wq(hwc, hwc->txq);
@@ -790,6 +787,9 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
if (hwc->cq)
mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
+ kfree(hwc->caller_ctx);
+ hwc->caller_ctx = NULL;
+
mana_gd_free_res_map(&hwc->inflight_msg_res);
hwc->num_inflight_msg = 0;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 37d4966d16db..d953d6084269 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1368,8 +1368,14 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
ndev = txq->ndev;
apc = netdev_priv(ndev);
+ /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
+ * doorbell can be rung in time for the hardware's requirement
+ * of at least one doorbell ring every 8 wraparounds.
+ */
comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
- CQE_POLLING_BUFFER);
+ min((cq->gdma_cq->queue_size /
+ COMP_ENTRY_SIZE) * 4,
+ CQE_POLLING_BUFFER));
if (comp_read < 1)
return;
@@ -1734,7 +1740,14 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
struct mana_rxq *rxq = cq->rxq;
int comp_read, i;
- comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
+ /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
+ * doorbell can be rung in time for the hardware's requirement
+ * of at least one doorbell ring every 8 wraparounds.
+ */
+ comp_read = mana_gd_poll_cq(cq->gdma_cq, comp,
+ min((cq->gdma_cq->queue_size /
+ COMP_ENTRY_SIZE) * 4,
+ CQE_POLLING_BUFFER));
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
rxq->xdp_flush = false;
@@ -1779,11 +1792,11 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
cq->work_done_since_doorbell = 0;
napi_complete_done(&cq->napi, w);
- } else if (cq->work_done_since_doorbell >
- cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
+ } else if (cq->work_done_since_doorbell >=
+ (cq->gdma_cq->queue_size / COMP_ENTRY_SIZE) * 4) {
/* MANA hardware requires at least one doorbell ring every 8
* wraparounds of CQ even if there is no need to arm the CQ.
- * This driver rings the doorbell as soon as we have exceeded
+ * This driver rings the doorbell as soon as it has processed
* 4 wraparounds.
*/
mana_gd_ring_cq(gdma_queue, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 517e997a585b..7d47c002eaf0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -374,7 +374,6 @@ enum request_irq_err {
REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
- REQ_IRQ_ERR_LPI,
REQ_IRQ_ERR_WOL,
REQ_IRQ_ERR_MAC,
REQ_IRQ_ERR_NO,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 23d9ece46d9c..7b3346c759a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -618,7 +618,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Setup MSI vector offset specific to Intel mGbE controller */
plat->msi_mac_vec = 29;
- plat->msi_lpi_vec = 28;
plat->msi_sfty_ce_vec = 27;
plat->msi_sfty_ue_vec = 26;
plat->msi_rx_base_vec = 0;
@@ -1004,8 +1003,6 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
- if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
- res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
@@ -1087,7 +1084,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
*/
plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
- plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index aced7589d20d..07f003e081cf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -476,13 +476,6 @@ static int loongson_dwmac_dt_config(struct pci_dev *pdev,
res->wol_irq = res->irq;
}
- res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
- if (res->lpi_irq < 0) {
- dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
- ret = -ENODEV;
- goto err_put_node;
- }
-
ret = device_get_phy_mode(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ea135203ff2e..0e85170f6191 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -29,7 +29,6 @@ struct stmmac_resources {
void __iomem *addr;
u8 mac[ETH_ALEN];
int wol_irq;
- int lpi_irq;
int irq;
int sfty_irq;
int sfty_ce_irq;
@@ -314,7 +313,6 @@ struct stmmac_priv {
bool wol_irq_disabled;
int clk_csr;
struct timer_list eee_ctrl_timer;
- int lpi_irq;
int eee_enabled;
int eee_active;
int tx_lpi_timer;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 396216633149..5016b8c39b68 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3580,10 +3580,6 @@ static void stmmac_free_irq(struct net_device *dev,
free_irq(priv->sfty_ce_irq, dev);
fallthrough;
case REQ_IRQ_ERR_SFTY_CE:
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
- free_irq(priv->lpi_irq, dev);
- fallthrough;
- case REQ_IRQ_ERR_LPI:
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
@@ -3642,24 +3638,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
- /* Request the LPI IRQ in case of another line
- * is used for LPI
- */
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
- int_name = priv->int_name_lpi;
- sprintf(int_name, "%s:%s", dev->name, "lpi");
- ret = request_irq(priv->lpi_irq,
- stmmac_mac_interrupt,
- 0, int_name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: alloc lpi MSI %d (error: %d)\n",
- __func__, priv->lpi_irq, ret);
- irq_err = REQ_IRQ_ERR_LPI;
- goto irq_error;
- }
- }
-
/* Request the common Safety Feature Correctible/Uncorrectible
* Error line in case of another line is used
*/
@@ -3800,19 +3778,6 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
- /* Request the IRQ lines */
- if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
- ret = request_irq(priv->lpi_irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
- __func__, priv->lpi_irq, ret);
- irq_err = REQ_IRQ_ERR_LPI;
- goto irq_error;
- }
- }
-
/* Request the common Safety Feature Correctible/Uncorrectible
* Error line in case of another line is used
*/
@@ -7576,7 +7541,6 @@ int stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
- priv->lpi_irq = res->lpi_irq;
priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8fd868b671a2..b79826a61ec4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -733,14 +733,6 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
stmmac_res->wol_irq = stmmac_res->irq;
}
- stmmac_res->lpi_irq =
- platform_get_irq_byname_optional(pdev, "eth_lpi");
- if (stmmac_res->lpi_irq < 0) {
- if (stmmac_res->lpi_irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
- }
-
stmmac_res->sfty_irq =
platform_get_irq_byname_optional(pdev, "sfty");
if (stmmac_res->sfty_irq < 0) {
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index 617333343ca0..f8f83fe424e5 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -344,6 +344,7 @@ static int mctp_i2c_recv(struct mctp_i2c_dev *midev)
} else {
status = NET_RX_DROP;
spin_unlock_irqrestore(&midev->lock, flags);
+ kfree_skb(skb);
}
if (status == NET_RX_SUCCESS) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 834624a61060..7f995d0e51f7 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1662,8 +1662,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
goto error;
phy_resume(phydev);
- if (!phydev->is_on_sfp_module)
- phy_led_triggers_register(phydev);
/**
* If the external phy used by current mac interface is managed by
@@ -2033,9 +2031,6 @@ void phy_detach(struct phy_device *phydev)
}
phydev->phylink = NULL;
- if (!phydev->is_on_sfp_module)
- phy_led_triggers_unregister(phydev);
-
if (phydev->mdio.dev.driver)
module_put(phydev->mdio.dev.driver->owner);
@@ -3660,17 +3655,28 @@ static int phy_probe(struct device *dev)
/* Set the state to READY by default */
phydev->state = PHY_READY;
+ /* Register the PHY LED triggers */
+ if (!phydev->is_on_sfp_module)
+ phy_led_triggers_register(phydev);
+
/* Get the LEDs from the device tree, and instantiate standard
* LEDs for them.
*/
if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
- !phy_driver_is_genphy_10g(phydev))
+ !phy_driver_is_genphy_10g(phydev)) {
err = of_phy_leds(phydev);
+ if (err)
+ goto out;
+ }
+
+ return 0;
out:
+ if (!phydev->is_on_sfp_module)
+ phy_led_triggers_unregister(phydev);
+
/* Re-assert the reset signal on error */
- if (err)
- phy_device_reset(phydev, 1);
+ phy_device_reset(phydev, 1);
return err;
}
@@ -3685,6 +3691,9 @@ static int phy_remove(struct device *dev)
!phy_driver_is_genphy_10g(phydev))
phy_leds_unregister(phydev);
+ if (!phydev->is_on_sfp_module)
+ phy_led_triggers_unregister(phydev);
+
phydev->state = PHY_DOWN;
sfp_bus_del_upstream(phydev->sfp_bus);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index cae748b76223..dd8d37b44aac 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -360,6 +360,12 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
sfp->state_ignore_mask |= SFP_F_TX_FAULT;
}
+static void sfp_fixup_ignore_tx_fault_and_los(struct sfp *sfp)
+{
+ sfp_fixup_ignore_tx_fault(sfp);
+ sfp_fixup_ignore_los(sfp);
+}
+
static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask)
{
sfp->state_hw_mask &= ~mask;
@@ -523,7 +529,7 @@ static const struct sfp_quirk sfp_quirks[] = {
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
// their EEPROM
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
- sfp_fixup_ignore_tx_fault),
+ sfp_fixup_ignore_tx_fault_and_los),
// Lantech 8330-262D-E and 8330-265D can operate at 2500base-X, but
// incorrectly report 2500MBd NRZ in their EEPROM.
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 9201ee10a13f..d316aa66dbc2 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1400,14 +1400,14 @@ static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC,
SFR_MEDIUM_STATUS_MODE, 2, ®16);
- aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0,
- WOL_CFG_SIZE, &wol_cfg);
- aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ aqc111_write_cmd_nopm(dev, AQ_WOL_CFG, 0, 0,
+ WOL_CFG_SIZE, &wol_cfg);
+ aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
} else {
aqc111_data->phy_cfg |= AQ_LOW_POWER;
- aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
- &aqc111_data->phy_cfg);
+ aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
/* Disable RX path */
aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 5c89e03f93d6..c00699cd3e35 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1657,6 +1657,7 @@ int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp16 *ndp16;
int ret = -EINVAL;
+ size_t ndp_len;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
@@ -1676,8 +1677,8 @@ int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* we process NDP entries except for the last one */
- if ((sizeof(struct usb_cdc_ncm_ndp16) +
- ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
+ ndp_len = struct_size_t(struct usb_cdc_ncm_ndp16, dpe16, ret);
+ if (ndpoffset + ndp_len > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
@@ -1693,6 +1694,7 @@ int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp32 *ndp32;
int ret = -EINVAL;
+ size_t ndp_len;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
@@ -1712,8 +1714,8 @@ int cdc_ncm_rx_verify_ndp32(struct sk_buff *skb_in, int ndpoffset)
sizeof(struct usb_cdc_ncm_dpe32));
ret--; /* we process NDP entries except for the last one */
- if ((sizeof(struct usb_cdc_ncm_ndp32) +
- ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) {
+ ndp_len = struct_size_t(struct usb_cdc_ncm_ndp32, dpe32, ret);
+ if (ndpoffset + ndp_len > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 4bb53919a1d7..8647980df005 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2667,6 +2667,10 @@ static void lan78xx_init_ltm(struct lan78xx_net *dev)
u32 buf;
u32 regs[6] = { 0 };
+ /* LAN7850 is USB 2.0 and does not support LTM */
+ if (dev->chipid == ID_REV_CHIP_ID_7850_)
+ return;
+
ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
if (buf & USB_CFG1_LTM_ENABLE_) {
u8 temp[2];
@@ -3537,6 +3541,7 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
*/
if (!(dev->net->features & NETIF_F_RXCSUM) ||
unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+ unlikely(rx_cmd_a & RX_CMD_A_CSE_MASK_) ||
((rx_cmd_a & RX_CMD_A_FVTG_) &&
!(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
skb->ip_summed = CHECKSUM_NONE;
@@ -3609,7 +3614,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
return 0;
}
- if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+ if (unlikely(rx_cmd_a & RX_CMD_A_RED_) &&
+ (rx_cmd_a & RX_CMD_A_RX_HARD_ERRS_MASK_)) {
netif_dbg(dev, rx_err, dev->net,
"Error rx_cmd_a=0x%08x", rx_cmd_a);
} else {
@@ -3884,7 +3890,7 @@ static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
}
tx_data += len;
- entry->length += len;
+ entry->length += max_t(unsigned int, len, ETH_ZLEN);
entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
dev_kfree_skb_any(skb);
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
index 968e5e5faee0..17a934acff3d 100644
--- a/drivers/net/usb/lan78xx.h
+++ b/drivers/net/usb/lan78xx.h
@@ -74,6 +74,9 @@
#define RX_CMD_A_ICSM_ (0x00004000)
#define RX_CMD_A_LEN_MASK_ (0x00003FFF)
+#define RX_CMD_A_RX_HARD_ERRS_MASK_ \
+ (RX_CMD_A_RX_ERRS_MASK_ & ~RX_CMD_A_CSE_MASK_)
+
/* Rx Command B */
#define RX_CMD_B_CSUM_SHIFT_ (16)
#define RX_CMD_B_CSUM_MASK_ (0xFFFF0000)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index bd51435ed418..fd8d7cb1870a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -928,7 +928,7 @@ static int qmi_wwan_resume(struct usb_interface *intf)
static const struct driver_info qmi_wwan_info = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .flags = FLAG_WWAN | FLAG_NOMAXMTU | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
@@ -937,7 +937,7 @@ static const struct driver_info qmi_wwan_info = {
static const struct driver_info qmi_wwan_info_quirk_dtr = {
.description = "WWAN/QMI device",
- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .flags = FLAG_WWAN | FLAG_NOMAXMTU | FLAG_SEND_ZLP,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f4a05737abf7..30585ef53ae3 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1797,11 +1797,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
net->flags |= IFF_NOARP;
- if (net->max_mtu > (dev->hard_mtu - net->hard_header_len))
+ if ((dev->driver_info->flags & FLAG_NOMAXMTU) == 0 &&
+ net->max_mtu > (dev->hard_mtu - net->hard_header_len))
net->max_mtu = dev->hard_mtu - net->hard_header_len;
- if (net->mtu > net->max_mtu)
- net->mtu = net->max_mtu;
+ if (net->mtu > (dev->hard_mtu - net->hard_header_len))
+ net->mtu = dev->hard_mtu - net->hard_header_len;
} else if (!info->in || !info->out)
status = usbnet_get_endpoints (dev, udev);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 78e8b5aecec0..91b9501c6d8c 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -881,8 +881,8 @@ static void lbs_free_adapter(struct lbs_private *priv)
{
lbs_free_cmd_buffer(priv);
kfifo_free(&priv->event_fifo);
- del_timer(&priv->command_timer);
- del_timer(&priv->tx_lockup_timer);
+ timer_delete_sync(&priv->command_timer);
+ timer_delete_sync(&priv->tx_lockup_timer);
del_timer(&priv->auto_deepsleep_timer);
}
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f251627c24c6..3c0f8f3ba266 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -210,7 +210,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (skb_headroom(skb) < (total_len - skb->len) &&
pskb_expand_head(skb, (total_len - skb->len), 0, GFP_ATOMIC)) {
wl1271_free_tx_id(wl, id);
- return -EAGAIN;
+ return -ENOMEM;
}
desc = skb_push(skb, total_len - skb->len);
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 6a5ce8ff91f0..b3d34433bd14 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -47,8 +47,8 @@ static int nxp_nci_i2c_set_mode(void *phy_id,
{
struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id;
- gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
- gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
+ gpiod_set_value_cansleep(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
+ gpiod_set_value_cansleep(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
usleep_range(10000, 15000);
if (mode == NXP_NCI_MODE_COLD)
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2237715e42eb..1d3c6c7a54dc 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -489,14 +489,15 @@ EXPORT_SYMBOL_GPL(nd_synchronize);
static void nd_async_device_register(void *d, async_cookie_t cookie)
{
struct device *dev = d;
+ struct device *parent = dev->parent;
if (device_add(dev) != 0) {
dev_err(dev, "%s: failed\n", __func__);
put_device(dev);
}
put_device(dev);
- if (dev->parent)
- put_device(dev->parent);
+ if (parent)
+ put_device(parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b31a2dad361d..6bd02c911650 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -343,7 +343,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
- for (i = 1; i <= dev->online_queues; i++)
+ for (i = 1; i < dev->online_queues; i++)
nvme_dbbuf_free(&dev->queues[i]);
}
}
@@ -1201,14 +1201,16 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+ int irq;
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
- disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ irq = pci_irq_vector(pdev, nvmeq->cq_vector);
+ disable_irq(irq);
spin_lock(&nvmeq->cq_poll_lock);
nvme_poll_cq(nvmeq, NULL);
spin_unlock(&nvmeq->cq_poll_lock);
- enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ enable_irq(irq);
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 357a46fdffed..6e8eedb8d521 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -347,6 +347,7 @@ static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
switch (dev->cpu_id) {
case AMD_CPU_ID_PCO:
case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_VG:
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
dev->num_ips = 12;
@@ -765,6 +766,7 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
case AMD_CPU_ID_PCO:
return MSG_OS_HINT_PCO;
case AMD_CPU_ID_RN:
+ case AMD_CPU_ID_VG:
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
@@ -977,6 +979,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_VG) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
{ }
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index f1166d15c856..17bc0994f376 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -62,6 +62,7 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define AMD_CPU_ID_RN 0x1630
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
+#define AMD_CPU_ID_VG 0x1645
#define AMD_CPU_ID_YC 0x14B5
#define AMD_CPU_ID_CB 0x14D8
#define AMD_CPU_ID_PS 0x14E8
diff --git a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
index f346aad8e9d8..af4d1920d488 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
@@ -94,8 +94,11 @@ int hp_alloc_enumeration_data(void)
bioscfg_drv.enumeration_instances_count =
hp_get_instance_count(HP_WMI_BIOS_ENUMERATION_GUID);
- bioscfg_drv.enumeration_data = kcalloc(bioscfg_drv.enumeration_instances_count,
- sizeof(*bioscfg_drv.enumeration_data), GFP_KERNEL);
+ if (!bioscfg_drv.enumeration_instances_count)
+ return -EINVAL;
+ bioscfg_drv.enumeration_data = kvcalloc(bioscfg_drv.enumeration_instances_count,
+ sizeof(*bioscfg_drv.enumeration_data), GFP_KERNEL);
+
if (!bioscfg_drv.enumeration_data) {
bioscfg_drv.enumeration_instances_count = 0;
return -ENOMEM;
@@ -444,6 +447,6 @@ void hp_exit_enumeration_attributes(void)
}
bioscfg_drv.enumeration_instances_count = 0;
- kfree(bioscfg_drv.enumeration_data);
+ kvfree(bioscfg_drv.enumeration_data);
bioscfg_drv.enumeration_data = NULL;
}
diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
index d2f0233cb620..20130549a7a5 100644
--- a/drivers/pmdomain/bcm/bcm2835-power.c
+++ b/drivers/pmdomain/bcm/bcm2835-power.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mfd/bcm2835-pm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -152,7 +153,6 @@ struct bcm2835_power {
static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable)
{
void __iomem *base = power->asb;
- u64 start;
u32 val;
switch (reg) {
@@ -165,8 +165,6 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
break;
}
- start = ktime_get_ns();
-
/* Enable the module's async AXI bridges. */
if (enable) {
val = readl(base + reg) & ~ASB_REQ_STOP;
@@ -175,11 +173,9 @@ static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable
}
writel(PM_PASSWORD | val, base + reg);
- while (!!(readl(base + reg) & ASB_ACK) == enable) {
- cpu_relax();
- if (ktime_get_ns() - start >= 1000)
- return -ETIMEDOUT;
- }
+ if (readl_poll_timeout_atomic(base + reg, val,
+ !!(val & ASB_ACK) != enable, 0, 5))
+ return -ETIMEDOUT;
return 0;
}
@@ -580,11 +576,11 @@ static int bcm2835_reset_status(struct reset_controller_dev *rcdev,
switch (id) {
case BCM2835_RESET_V3D:
- return !PM_READ(PM_GRAFX & PM_V3DRSTN);
+ return !(PM_READ(PM_GRAFX) & PM_V3DRSTN);
case BCM2835_RESET_H264:
- return !PM_READ(PM_IMAGE & PM_H264RSTN);
+ return !(PM_READ(PM_IMAGE) & PM_H264RSTN);
case BCM2835_RESET_ISP:
- return !PM_READ(PM_IMAGE & PM_ISPRSTN);
+ return !(PM_READ(PM_IMAGE) & PM_ISPRSTN);
default:
return -EINVAL;
}
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 1ffa145319f2..2a0fac873f9c 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -965,7 +965,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
if (pca9450->irq) {
ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL,
pca9450_irq_handler,
- (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ (IRQF_TRIGGER_LOW | IRQF_ONESHOT),
"pca9450-irq", pca9450);
if (ret != 0) {
dev_err(pca9450->dev, "Failed to request IRQ: %d\n",
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index ae20d2221c8e..fcd2665f7abb 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -1544,12 +1544,51 @@ static const struct of_device_id mtk_scp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
+static int __maybe_unused scp_suspend(struct device *dev)
+{
+ struct mtk_scp *scp = dev_get_drvdata(dev);
+ struct rproc *rproc = scp->rproc;
+
+ /*
+ * Only unprepare if the SCP is running and holding the clock.
+ *
+ * Note: `scp_ops` doesn't implement .attach() callback, hence
+ * `rproc->state` can never be RPROC_ATTACHED. Otherwise, it
+ * should also be checked here.
+ */
+ if (rproc->state == RPROC_RUNNING)
+ clk_unprepare(scp->clk);
+ return 0;
+}
+
+static int __maybe_unused scp_resume(struct device *dev)
+{
+ struct mtk_scp *scp = dev_get_drvdata(dev);
+ struct rproc *rproc = scp->rproc;
+
+ /*
+ * Only prepare if the SCP was running and holding the clock.
+ *
+ * Note: `scp_ops` doesn't implement .attach() callback, hence
+ * `rproc->state` can never be RPROC_ATTACHED. Otherwise, it
+ * should also be checked here.
+ */
+ if (rproc->state == RPROC_RUNNING)
+ return clk_prepare(scp->clk);
+ return 0;
+}
+
+static const struct dev_pm_ops scp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(scp_suspend, scp_resume)
+};
+
static struct platform_driver mtk_scp_driver = {
.probe = scp_probe,
.remove_new = scp_remove,
.driver = {
.name = "mtk-scp",
.of_match_table = mtk_scp_of_match,
+ .pm = &scp_pm_ops,
},
};
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c24e4a882873..db33a41051a3 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -203,7 +203,7 @@ static const struct qmi_elem_info ssctl_shutdown_resp_ei[] = {
};
struct ssctl_subsys_event_req {
- u8 subsys_name_len;
+ u32 subsys_name_len;
char subsys_name[SSCTL_SUBSYS_NAME_LENGTH];
u32 event;
u8 evt_driven_valid;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index b7fbfd9aa908..5ff386fabdc9 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6145,6 +6145,7 @@ static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busi
static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
char *sec_busid)
{
+ struct dasd_eckd_private *prim_priv, *sec_priv;
struct dasd_device *primary, *secondary;
struct dasd_copy_relation *copy;
struct dasd_block *block;
@@ -6165,6 +6166,9 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
if (!secondary)
return DASD_COPYPAIRSWAP_SECONDARY;
+ prim_priv = primary->private;
+ sec_priv = secondary->private;
+
/*
* usually the device should be quiesced for swap
* for paranoia stop device and requeue requests again
@@ -6192,6 +6196,18 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
dev_name(&secondary->cdev->dev), rc);
}
+ if (primary->stopped & DASD_STOPPED_QUIESCE) {
+ dasd_device_set_stop_bits(secondary, DASD_STOPPED_QUIESCE);
+ dasd_device_remove_stop_bits(primary, DASD_STOPPED_QUIESCE);
+ }
+
+ /*
+ * The secondary device never got through format detection, but since it
+ * is a copy of the primary device, the format is exactly the same;
+ * therefore, the detected layout can simply be copied.
+ */
+ sec_priv->uses_cdl = prim_priv->uses_cdl;
+
/* re-enable device */
dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 43a27cb3db84..9dd4d01e1203 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1665,11 +1665,13 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
memset(ci, 0, sizeof(*ci));
- /* get first info from zcrypt device driver about this apqn */
- rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
- if (rc)
- return rc;
- ci->hwtype = devstat.hwtype;
+ /* if specific domain given, fetch status and hw info for this apqn */
+ if (domain != AUTOSEL_DOM) {
+ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
+ if (rc)
+ return rc;
+ ci->hwtype = devstat.hwtype;
+ }
/* prep page for rule array and var array use */
pg = (u8 *)__get_free_page(GFP_KERNEL);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 64df7d2f6266..63b793fc97d8 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -85,8 +85,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
memset(&ci, 0, sizeof(ci));
- if (ap_domain_index >= 0)
- cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
+ cca_get_info(ac->id, AUTOSEL_DOM, &ci, zc->online);
return sysfs_emit(buf, "%s\n", ci.serial);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 010479a354ee..3311f9b9eca6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -46,6 +46,13 @@
#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
#define HISI_SAS_FIFO_DATA_DW_SIZE 32
+#define HISI_SAS_REG_MEM_SIZE 4
+#define HISI_SAS_MAX_CDB_LEN 16
+#define HISI_SAS_BLK_QUEUE_DEPTH 64
+
+#define BYTE_TO_DW 4
+#define BYTE_TO_DDW 8
+
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -92,6 +99,8 @@
#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
+#define HISI_SAS_DELAY_FOR_PHY_DISABLE 100
+#define NAME_BUF_SIZE 256
struct hisi_hba;
@@ -167,6 +176,8 @@ struct hisi_sas_debugfs_fifo {
u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE];
};
+#define FRAME_RCVD_BUF 32
+#define SAS_PHY_RESV_SIZE 2
struct hisi_sas_phy {
struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
@@ -178,10 +189,10 @@ struct hisi_sas_phy {
spinlock_t lock;
u64 port_id; /* from hw */
u64 frame_rcvd_size;
- u8 frame_rcvd[32];
+ u8 frame_rcvd[FRAME_RCVD_BUF];
u8 phy_attached;
u8 in_reset;
- u8 reserved[2];
+ u8 reserved[SAS_PHY_RESV_SIZE];
u32 phy_type;
u32 code_violation_err_count;
enum sas_linkrate minimum_linkrate;
@@ -348,6 +359,7 @@ struct hisi_sas_hw {
};
#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+#define HISI_SAS_DEFAULT_DEBUGFS_DUMP 1
struct hisi_sas_debugfs_cq {
struct hisi_sas_cq *cq;
@@ -527,12 +539,13 @@ struct hisi_sas_cmd_hdr {
__le64 dif_prd_table_addr;
};
+#define ITCT_RESV_DDW 12
struct hisi_sas_itct {
__le64 qw0;
__le64 sas_addr;
__le64 qw2;
__le64 qw3;
- __le64 qw4_15[12];
+ __le64 qw4_15[ITCT_RESV_DDW];
};
struct hisi_sas_iost {
@@ -542,22 +555,26 @@ struct hisi_sas_iost {
__le64 qw3;
};
+#define ERROR_RECORD_BUF_DW 4
struct hisi_sas_err_record {
- u32 data[4];
+ u32 data[ERROR_RECORD_BUF_DW];
};
+#define FIS_RESV_DW 3
struct hisi_sas_initial_fis {
struct hisi_sas_err_record err_record;
struct dev_to_host_fis fis;
- u32 rsvd[3];
+ u32 rsvd[FIS_RESV_DW];
};
+#define BREAKPOINT_DATA_SIZE 128
struct hisi_sas_breakpoint {
- u8 data[128];
+ u8 data[BREAKPOINT_DATA_SIZE];
};
+#define BREAKPOINT_TAG_NUM 32
struct hisi_sas_sata_breakpoint {
- struct hisi_sas_breakpoint tag[32];
+ struct hisi_sas_breakpoint tag[BREAKPOINT_TAG_NUM];
};
struct hisi_sas_sge {
@@ -568,13 +585,15 @@ struct hisi_sas_sge {
__le32 data_off;
};
+#define SMP_CMD_TABLE_SIZE 44
struct hisi_sas_command_table_smp {
- u8 bytes[44];
+ u8 bytes[SMP_CMD_TABLE_SIZE];
};
+#define DUMMY_BUF_SIZE 12
struct hisi_sas_command_table_stp {
struct host_to_dev_fis command_fis;
- u8 dummy[12];
+ u8 dummy[DUMMY_BUF_SIZE];
u8 atapi_cdb[ATAPI_CDB_LEN];
};
@@ -588,12 +607,13 @@ struct hisi_sas_sge_dif_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT];
} __aligned(16);
+#define PROT_BUF_SIZE 7
struct hisi_sas_command_table_ssp {
struct ssp_frame_hdr hdr;
union {
struct {
struct ssp_command_iu task;
- u32 prot[7];
+ u32 prot[PROT_BUF_SIZE];
};
struct ssp_tmf_iu ssp_task;
struct xfer_rdy_iu xfer_rdy;
@@ -607,9 +627,10 @@ union hisi_sas_command_table {
struct hisi_sas_command_table_stp stp;
} __aligned(16);
+#define IU_BUF_SIZE 1024
struct hisi_sas_status_buffer {
struct hisi_sas_err_record err;
- u8 iu[1024];
+ u8 iu[IU_BUF_SIZE];
} __aligned(16);
struct hisi_sas_slot_buf_table {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d9500b730690..236e23620f21 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -7,6 +7,16 @@
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"
+#define LINK_RATE_BIT_MASK 2
+#define FIS_BUF_SIZE 20
+#define WAIT_CMD_COMPLETE_DELAY 100
+#define WAIT_CMD_COMPLETE_TMROUT 5000
+#define DELAY_FOR_LINK_READY 2000
+#define BLK_CNT_OPTIMIZE_MARK 64
+#define HZ_TO_MHZ 1000000
+#define DELAY_FOR_SOFTRESET_MAX 1000
+#define DELAY_FOR_SOFTRESET_MIN 900
+
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
@@ -127,7 +137,7 @@ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
max -= SAS_LINK_RATE_1_5_GBPS;
for (i = 0; i <= max; i++)
- rate |= 1 << (i * 2);
+ rate |= 1 << (i * LINK_RATE_BIT_MASK);
return rate;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
@@ -877,7 +887,7 @@ int hisi_sas_device_configure(struct scsi_device *sdev,
if (ret)
return ret;
if (!dev_is_sata(dev))
- sas_change_queue_depth(sdev, 64);
+ sas_change_queue_depth(sdev, HISI_SAS_BLK_QUEUE_DEPTH);
return 0;
}
@@ -1239,7 +1249,7 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
sas_phy->phy->minimum_linkrate = min;
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
@@ -1269,7 +1279,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_LINK_RESET:
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
break;
@@ -1324,7 +1334,7 @@ static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
static int hisi_sas_softreset_ata_disk(struct domain_device *device)
{
- u8 fis[20] = {0};
+ u8 fis[FIS_BUF_SIZE] = {0};
struct ata_port *ap = device->sata_dev.ap;
struct ata_link *link;
int rc = TMF_RESP_FUNC_FAILED;
@@ -1341,6 +1351,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
}
if (rc == TMF_RESP_FUNC_COMPLETE) {
+ usleep_range(DELAY_FOR_SOFTRESET_MIN, DELAY_FOR_SOFTRESET_MAX);
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
@@ -1459,7 +1470,7 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
struct ata_link *link;
- u8 fis[20] = {0};
+ u8 fis[FIS_BUF_SIZE] = {0};
int i;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1526,7 +1537,9 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
- hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
+ hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba,
+ WAIT_CMD_COMPLETE_DELAY,
+ WAIT_CMD_COMPLETE_TMROUT);
/*
* hisi_hba->timer is only used for v1/v2 hw, and check hw->sht
@@ -1827,7 +1840,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
- msleep(2000);
+ msleep(DELAY_FOR_LINK_READY);
}
return rc;
@@ -2242,12 +2255,14 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
goto err_out;
/* roundup to avoid overly large block size */
- max_command_entries_ru = roundup(max_command_entries, 64);
+ max_command_entries_ru = roundup(max_command_entries,
+ BLK_CNT_OPTIMIZE_MARK);
if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
else
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
- sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
+
+ sz_slot_buf_ru = roundup(sz_slot_buf_ru, BLK_CNT_OPTIMIZE_MARK);
s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
slots_per_blk = s / sz_slot_buf_ru;
@@ -2412,7 +2427,8 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
if (IS_ERR(refclk))
dev_dbg(dev, "no ref clk property\n");
else
- hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+ hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) /
+ HZ_TO_MHZ;
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
dev_err(dev, "could not get property phy-count\n");
@@ -2528,8 +2544,8 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->transportt = hisi_sas_stt;
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
- shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_channel = 0;
+ shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
if (hisi_hba->hw->slot_index_alloc) {
shost->can_queue = HISI_SAS_MAX_COMMANDS;
shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2b04556681a1..e958b588d078 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -465,6 +465,12 @@
#define ITCT_HDR_RTOLT_OFF 48
#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+/*debugfs*/
+#define TWO_PARA_PER_LINE 2
+#define FOUR_PARA_PER_LINE 4
+#define DUMP_BUF_SIZE 8
+#define BIST_BUF_SIZE 16
+
struct hisi_sas_protect_iu_v3_hw {
u32 dw0;
u32 lbrtcv;
@@ -535,6 +541,43 @@ struct hisi_sas_err_record_v3 {
#define BASE_VECTORS_V3_HW 16
#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1)
+#define IRQ_PHY_UP_DOWN_INDEX 1
+#define IRQ_CHL_INDEX 2
+#define IRQ_AXI_INDEX 11
+
+#define DELAY_FOR_RESET_HW 100
+#define HDR_SG_MOD 0x2
+#define LUN_SIZE 8
+#define ATTR_PRIO_REGION 9
+#define CDB_REGION 12
+#define PRIO_OFF 3
+#define TMF_REGION 10
+#define TAG_MSB 12
+#define TAG_LSB 13
+#define SMP_FRAME_TYPE 2
+#define SMP_CRC_SIZE 4
+#define HDR_TAG_OFF 3
+#define HOST_NO_OFF 6
+#define PHY_NO_OFF 7
+#define IDENTIFY_REG_READ 6
+#define LINK_RESET_TIMEOUT_OFF 4
+#define DECIMALISM_FLAG 10
+#define WAIT_RETRY 100
+#define WAIT_TMROUT 5000
+
+#define ID_DWORD0_INDEX 0
+#define ID_DWORD1_INDEX 1
+#define ID_DWORD2_INDEX 2
+#define ID_DWORD3_INDEX 3
+#define ID_DWORD4_INDEX 4
+#define ID_DWORD5_INDEX 5
+#define TICKS_BIT_INDEX 24
+#define COUNT_BIT_INDEX 8
+
+#define PORT_REG_LENGTH 0x100
+#define GLOBAL_REG_LENGTH 0x800
+#define AXI_REG_LENGTH 0x61
+#define RAS_REG_LENGTH 0x10
#define CHNL_INT_STS_MSK 0xeeeeeeee
#define CHNL_INT_STS_PHY_MSK 0xe
@@ -807,17 +850,17 @@ static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
identify_buffer = (u32 *)(&identify_frame);
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
- __swab32(identify_buffer[0]));
+ __swab32(identify_buffer[ID_DWORD0_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
- __swab32(identify_buffer[1]));
+ __swab32(identify_buffer[ID_DWORD1_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
- __swab32(identify_buffer[2]));
+ __swab32(identify_buffer[ID_DWORD2_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
- __swab32(identify_buffer[3]));
+ __swab32(identify_buffer[ID_DWORD3_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
- __swab32(identify_buffer[4]));
+ __swab32(identify_buffer[ID_DWORD4_INDEX]));
hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
- __swab32(identify_buffer[5]));
+ __swab32(identify_buffer[ID_DWORD5_INDEX]));
}
static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
@@ -937,7 +980,7 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
/* Disable all of the PHYs */
hisi_sas_stop_phys(hisi_hba);
- udelay(50);
+ udelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
/* Ensure axi bus idle */
ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
@@ -977,7 +1020,7 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return rc;
}
- msleep(100);
+ msleep(DELAY_FOR_RESET_HW);
init_reg_v3_hw(hisi_hba);
if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
@@ -1026,7 +1069,7 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
- mdelay(50);
+ mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
state = hisi_sas_read32(hisi_hba, PHY_STATE);
if (state & BIT(phy_no)) {
@@ -1062,7 +1105,7 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
txid_auto | TX_HARDRST_MSK);
}
- msleep(100);
+ msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE);
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
@@ -1107,7 +1150,8 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
for (i = 0; i < hisi_hba->n_phy; i++)
if (phy_state & BIT(i))
- if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ if (((phy_port_num_ma >> (i * HISI_SAS_REG_MEM_SIZE)) & 0xf) ==
+ port_id)
bitmap |= BIT(i);
return bitmap;
@@ -1305,9 +1349,9 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
- + 3) / 4) << CMD_HDR_CFL_OFF) |
- ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
- (2 << CMD_HDR_SG_MOD_OFF);
+ + 3) / BYTE_TO_DW) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_MRFL_OFF) |
+ (HDR_SG_MOD << CMD_HDR_SG_MOD_OFF);
hdr->dw2 = cpu_to_le32(dw2);
hdr->transfer_tags = cpu_to_le32(slot->idx);
@@ -1327,18 +1371,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
sizeof(struct ssp_frame_hdr);
- memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE);
if (!tmf) {
- buf_cmd[9] = ssp_task->task_attr;
- memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ buf_cmd[ATTR_PRIO_REGION] = ssp_task->task_attr;
+ memcpy(buf_cmd + CDB_REGION, scsi_cmnd->cmnd,
+ scsi_cmnd->cmd_len);
} else {
- buf_cmd[10] = tmf->tmf;
+ buf_cmd[TMF_REGION] = tmf->tmf;
switch (tmf->tmf) {
case TMF_ABORT_TASK:
case TMF_QUERY_TASK:
- buf_cmd[12] =
+ buf_cmd[TAG_MSB] =
(tmf->tag_of_task_to_be_managed >> 8) & 0xff;
- buf_cmd[13] =
+ buf_cmd[TAG_LSB] =
tmf->tag_of_task_to_be_managed & 0xff;
break;
default:
@@ -1371,7 +1416,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
unsigned int interval = scsi_prot_interval(scsi_cmnd);
unsigned int ilog2_interval = ilog2(interval);
- len = (task->total_xfer_len >> ilog2_interval) * 8;
+ len = (task->total_xfer_len >> ilog2_interval) *
+ BYTE_TO_DDW;
}
}
@@ -1391,6 +1437,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev = device->lldd_dev;
dma_addr_t req_dma_addr;
unsigned int req_len;
+ u32 cfl;
/* req */
sg_req = &task->smp_task.smp_req;
@@ -1401,7 +1448,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
/* dw0 */
hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
(1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
- (2 << CMD_HDR_CMD_OFF)); /* smp */
+ (SMP_FRAME_TYPE << CMD_HDR_CMD_OFF)); /* smp */
/* map itct entry */
hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
@@ -1409,8 +1456,9 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
(DIR_NO_DATA << CMD_HDR_DIR_OFF));
/* dw2 */
- hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
- (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ cfl = (req_len - SMP_CRC_SIZE) / BYTE_TO_DW;
+ hdr->dw2 = cpu_to_le32((cfl << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / BYTE_TO_DW <<
CMD_HDR_MRFL_OFF));
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
@@ -1477,12 +1525,13 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct ata_queued_cmd *qc = task->uldd_task;
hdr_tag = qc->tag;
- task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ task->ata_task.fis.sector_count |=
+ (u8)(hdr_tag << HDR_TAG_OFF);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
- dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
- 2 << CMD_HDR_SG_MOD_OFF;
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_CFL_OFF |
+ HDR_SG_MOD << CMD_HDR_SG_MOD_OFF;
hdr->dw2 = cpu_to_le32(dw2);
/* dw3 */
@@ -1542,9 +1591,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
- port_id = (port_id >> (4 * phy_no)) & 0xf;
+ port_id = (port_id >> (HISI_SAS_REG_MEM_SIZE * phy_no)) & 0xf;
link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
- link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ link_rate = (link_rate >> (phy_no * HISI_SAS_REG_MEM_SIZE)) & 0xf;
if (port_id == 0xf) {
dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
@@ -1577,8 +1626,8 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
sas_phy->oob_mode = SATA_OOB_MODE;
attached_sas_addr[0] = 0x50;
- attached_sas_addr[6] = shost->host_no;
- attached_sas_addr[7] = phy_no;
+ attached_sas_addr[HOST_NO_OFF] = shost->host_no;
+ attached_sas_addr[PHY_NO_OFF] = phy_no;
memcpy(sas_phy->attached_sas_addr,
attached_sas_addr,
SAS_ADDR_SIZE);
@@ -1594,7 +1643,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
(struct sas_identify_frame *)frame_rcvd;
dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < IDENTIFY_REG_READ; i++) {
u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
RX_IDAF_DWORD0 + (i * 4));
frame_rcvd[i] = __swab32(idaf);
@@ -1864,7 +1913,7 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
phy_no, reg_value);
- if (reg_value & BIT(4))
+ if (reg_value & BIT(LINK_RESET_TIMEOUT_OFF))
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
@@ -2581,7 +2630,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
struct pci_dev *pdev = hisi_hba->pci_dev;
int rc, i;
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX),
int_phy_up_down_bcast_v3_hw, 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -2589,7 +2638,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
}
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_CHL_INDEX),
int_chnl_int_v3_hw, 0,
DRV_NAME " channel", hisi_hba);
if (rc) {
@@ -2597,7 +2646,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return -ENOENT;
}
- rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_AXI_INDEX),
fatal_axi_int_v3_hw, 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -2610,7 +2659,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW :
+ BASE_VECTORS_V3_HW + i;
unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED :
IRQF_ONESHOT;
@@ -2668,14 +2718,14 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
struct pci_dev *pdev = hisi_hba->pci_dev;
int i;
- synchronize_irq(pci_irq_vector(pdev, 1));
- synchronize_irq(pci_irq_vector(pdev, 2));
- synchronize_irq(pci_irq_vector(pdev, 11));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_CHL_INDEX));
+ synchronize_irq(pci_irq_vector(pdev, IRQ_AXI_INDEX));
for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
for (i = 0; i < hisi_hba->cq_nvecs; i++)
- synchronize_irq(pci_irq_vector(pdev, i + 16));
+ synchronize_irq(pci_irq_vector(pdev, i + BASE_VECTORS_V3_HW));
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
@@ -2707,7 +2757,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_stop_phys(hisi_hba);
- mdelay(10);
+ mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE);
reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
AM_CTRL_GLOBAL);
@@ -2843,13 +2893,13 @@ static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
u32 intr_coal_ticks;
int ret;
- ret = kstrtou32(buf, 10, &intr_coal_ticks);
+ ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_ticks);
if (ret) {
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
return -EINVAL;
}
- if (intr_coal_ticks >= BIT(24)) {
+ if (intr_coal_ticks >= BIT(TICKS_BIT_INDEX)) {
dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
return -EINVAL;
}
@@ -2882,13 +2932,13 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
u32 intr_coal_count;
int ret;
- ret = kstrtou32(buf, 10, &intr_coal_count);
+ ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_count);
if (ret) {
dev_err(dev, "Input data of interrupt coalesce unmatch\n");
return -EINVAL;
}
- if (intr_coal_count >= BIT(8)) {
+ if (intr_coal_count >= BIT(COUNT_BIT_INDEX)) {
dev_err(dev, "intr_coal_count must be less than 2^8!\n");
return -EINVAL;
}
@@ -3020,7 +3070,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
.lu = debugfs_port_reg_lu,
- .count = 0x100,
+ .count = PORT_REG_LENGTH,
.base_off = PORT_BASE,
};
@@ -3094,7 +3144,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
.lu = debugfs_global_reg_lu,
- .count = 0x800,
+ .count = GLOBAL_REG_LENGTH,
};
static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
@@ -3107,7 +3157,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
.lu = debugfs_axi_reg_lu,
- .count = 0x61,
+ .count = AXI_REG_LENGTH,
.base_off = AXI_MASTER_CFG_BASE,
};
@@ -3124,7 +3174,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
.lu = debugfs_ras_reg_lu,
- .count = 0x10,
+ .count = RAS_REG_LENGTH,
.base_off = RAS_BASE,
};
@@ -3133,7 +3183,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
scsi_block_requests(shost);
- wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+ wait_cmds_complete_timeout_v3_hw(hisi_hba, WAIT_RETRY, WAIT_TMROUT);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_sync_cqs(hisi_hba);
@@ -3174,7 +3224,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
return;
}
- memset(buf, 0, cache_dw_size * 4);
+ memset(buf, 0, cache_dw_size * BYTE_TO_DW);
buf[0] = val;
for (i = 1; i < cache_dw_size; i++)
@@ -3221,7 +3271,7 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
/* init OOB link rate as 1.5 Gbits */
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
- reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ reg_val |= (SAS_LINK_RATE_1_5_GBPS << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
/* enable PHY */
@@ -3230,6 +3280,9 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
#define SAS_PHY_BIST_CODE_INIT 0x1
#define SAS_PHY_BIST_CODE1_INIT 0X80
+#define SAS_PHY_BIST_INIT_DELAY 100
+#define SAS_PHY_BIST_LOOP_TEST_0 1
+#define SAS_PHY_BIST_LOOP_TEST_1 2
static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
{
u32 reg_val, mode_tmp;
@@ -3248,7 +3301,8 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
fix_code[FIXED_CODE_1]);
- mode_tmp = path_mode ? 2 : 1;
+ mode_tmp = path_mode ? SAS_PHY_BIST_LOOP_TEST_1 :
+ SAS_PHY_BIST_LOOP_TEST_0;
if (enable) {
/* some preparations before bist test */
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
@@ -3291,13 +3345,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1_INIT);
}
- mdelay(100);
+ mdelay(SAS_PHY_BIST_INIT_DELAY);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
reg_val);
/* clear error bit */
- mdelay(100);
+ mdelay(SAS_PHY_BIST_INIT_DELAY);
hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
} else {
/* disable bist test and recover it */
@@ -3473,7 +3527,7 @@ static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba)
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
- offset = port->base_off + 4 * i;
+ offset = port->base_off + HISI_SAS_REG_MEM_SIZE * i;
*databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt,
offset);
}
@@ -3487,7 +3541,8 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < debugfs_global_reg.count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i);
}
static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3498,7 +3553,9 @@ static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < axi->count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i +
+ axi->base_off);
}
static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3509,7 +3566,9 @@ static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba)
int i;
for (i = 0; i < ras->count; i++, databuf++)
- *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off);
+ *databuf = hisi_sas_read32(hisi_hba,
+ HISI_SAS_REG_MEM_SIZE * i +
+ ras->base_off);
}
static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba)
@@ -3572,7 +3631,7 @@ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
int i;
for (i = 0; i < reg->count; i++) {
- int off = i * 4;
+ int off = i * HISI_SAS_REG_MEM_SIZE;
const char *name;
name = debugfs_to_reg_name_v3_hw(off, reg->base_off,
@@ -3650,9 +3709,9 @@ static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index,
/* completion header size not fixed per HW version */
seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 8; i++, ptr++) {
+ for (i = 1; i <= sz / BYTE_TO_DDW; i++, ptr++) {
seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
- if (!(i % 2))
+ if (!(i % TWO_PARA_PER_LINE))
seq_puts(s, "\n\t");
}
@@ -3666,9 +3725,9 @@ static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index,
/* completion header size not fixed per HW version */
seq_printf(s, "index %04d:\n\t", index);
- for (i = 1; i <= sz / 4; i++, ptr++) {
+ for (i = 1; i <= sz / BYTE_TO_DW; i++, ptr++) {
seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
- if (!(i % 4))
+ if (!(i % FOUR_PARA_PER_LINE))
seq_puts(s, "\n\t");
}
seq_puts(s, "\n");
@@ -3753,7 +3812,7 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
struct hisi_sas_iost_itct_cache *iost_cache =
debugfs_iost_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW;
int i, tab_idx;
__le64 *iost;
@@ -3801,7 +3860,7 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
struct hisi_sas_iost_itct_cache *itct_cache =
debugfs_itct_cache->cache;
- u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW;
int i, tab_idx;
__le64 *itct;
@@ -3830,12 +3889,12 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
u64 *debugfs_timestamp;
struct dentry *dump_dentry;
struct dentry *dentry;
- char name[256];
+ char name[NAME_BUF_SIZE];
int p;
int c;
int d;
- snprintf(name, 256, "%d", index);
+ snprintf(name, NAME_BUF_SIZE, "%d", index);
dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
@@ -3851,7 +3910,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
- snprintf(name, 256, "%d", p);
+ snprintf(name, NAME_BUF_SIZE, "%d", p);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_port_reg[index][p],
@@ -3861,7 +3920,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create CQ dir and files */
dentry = debugfs_create_dir("cq", dump_dentry);
for (c = 0; c < hisi_hba->queue_count; c++) {
- snprintf(name, 256, "%d", c);
+ snprintf(name, NAME_BUF_SIZE, "%d", c);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_cq[index][c],
@@ -3871,7 +3930,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
/* Create DQ dir and files */
dentry = debugfs_create_dir("dq", dump_dentry);
for (d = 0; d < hisi_hba->queue_count; d++) {
- snprintf(name, 256, "%d", d);
+ snprintf(name, NAME_BUF_SIZE, "%d", d);
debugfs_create_file(name, 0400, dentry,
&hisi_hba->debugfs_dq[index][d],
@@ -3908,9 +3967,9 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
size_t count, loff_t *ppos)
{
struct hisi_hba *hisi_hba = file->f_inode->i_private;
- char buf[8];
+ char buf[DUMP_BUF_SIZE];
- if (count > 8)
+ if (count > DUMP_BUF_SIZE)
return -EFAULT;
if (copy_from_user(buf, user_buf, count))
@@ -3974,7 +4033,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -3991,7 +4050,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp,
for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) {
if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name,
- pkbuf, 16)) {
+ pkbuf, BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_linkrate =
debugfs_loop_linkrate_v3_hw[i].value;
found = true;
@@ -4049,7 +4108,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4066,7 +4125,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp,
for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) {
if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name,
- pkbuf, 16)) {
+ pkbuf, BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_code_mode =
debugfs_loop_code_mode_v3_hw[i].value;
found = true;
@@ -4181,7 +4240,7 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
{
struct seq_file *m = filp->private_data;
struct hisi_hba *hisi_hba = m->private;
- char kbuf[16] = {}, *pkbuf;
+ char kbuf[BIST_BUF_SIZE] = {}, *pkbuf;
bool found = false;
int i;
@@ -4197,7 +4256,8 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp,
pkbuf = strstrip(kbuf);
for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) {
- if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) {
+ if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf,
+ BIST_BUF_SIZE)) {
hisi_hba->debugfs_bist_mode =
debugfs_loop_modes_v3_hw[i].value;
found = true;
@@ -4476,8 +4536,9 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
debugfs_read_fifo_data_v3_hw(phy);
- debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
- (__le32 *)phy->fifo.rd_data);
+ debugfs_show_row_32_v3_hw(s, 0,
+ HISI_SAS_FIFO_DATA_DW_SIZE * HISI_SAS_REG_MEM_SIZE,
+ phy->fifo.rd_data);
return 0;
}
@@ -4609,14 +4670,14 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
struct hisi_sas_debugfs_regs *regs =
&hisi_hba->debugfs_regs[dump_index][r];
- sz = debugfs_reg_array_v3_hw[r]->count * 4;
+ sz = debugfs_reg_array_v3_hw[r]->count * HISI_SAS_REG_MEM_SIZE;
regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!regs->data)
goto fail;
regs->hisi_hba = hisi_hba;
}
- sz = debugfs_port_reg.count * 4;
+ sz = debugfs_port_reg.count * HISI_SAS_REG_MEM_SIZE;
for (p = 0; p < hisi_hba->n_phy; p++) {
struct hisi_sas_debugfs_port *port =
&hisi_hba->debugfs_port_reg[dump_index][p];
@@ -4726,11 +4787,11 @@ static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct dentry *dir = debugfs_create_dir("phy_down_cnt",
hisi_hba->debugfs_dir);
- char name[16];
+ char name[NAME_BUF_SIZE];
int phy_no;
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
- snprintf(name, 16, "%d", phy_no);
+ snprintf(name, NAME_BUF_SIZE, "%d", phy_no);
debugfs_create_file(name, 0600, dir,
&hisi_hba->phy[phy_no],
&debugfs_phy_down_cnt_v3_hw_fops);
@@ -4898,8 +4959,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->transportt = hisi_sas_stt;
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
- shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_channel = 0;
+ shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
if (hisi_hba->iopoll_q_cnt)
@@ -4981,12 +5042,13 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
{
int i;
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba);
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba);
- devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), hisi_hba);
+ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), hisi_hba);
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- int nr = hisi_sas_intr_conv ? 16 : 16 + i;
+ int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW :
+ BASE_VECTORS_V3_HW + i;
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 4198830bf10b..3a057a0f0d80 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -4677,21 +4677,25 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
}
for (i = 0; i < mrioc->num_queues; i++) {
- mrioc->op_reply_qinfo[i].qid = 0;
- mrioc->op_reply_qinfo[i].ci = 0;
- mrioc->op_reply_qinfo[i].num_replies = 0;
- mrioc->op_reply_qinfo[i].ephase = 0;
- atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
- atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
- mpi3mr_memset_op_reply_q_buffers(mrioc, i);
-
- mrioc->req_qinfo[i].ci = 0;
- mrioc->req_qinfo[i].pi = 0;
- mrioc->req_qinfo[i].num_requests = 0;
- mrioc->req_qinfo[i].qid = 0;
- mrioc->req_qinfo[i].reply_qid = 0;
- spin_lock_init(&mrioc->req_qinfo[i].q_lock);
- mpi3mr_memset_op_req_q_buffers(mrioc, i);
+ if (mrioc->op_reply_qinfo) {
+ mrioc->op_reply_qinfo[i].qid = 0;
+ mrioc->op_reply_qinfo[i].ci = 0;
+ mrioc->op_reply_qinfo[i].num_replies = 0;
+ mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
+ mpi3mr_memset_op_reply_q_buffers(mrioc, i);
+ }
+
+ if (mrioc->req_qinfo) {
+ mrioc->req_qinfo[i].ci = 0;
+ mrioc->req_qinfo[i].pi = 0;
+ mrioc->req_qinfo[i].num_requests = 0;
+ mrioc->req_qinfo[i].qid = 0;
+ mrioc->req_qinfo[i].reply_qid = 0;
+ spin_lock_init(&mrioc->req_qinfo[i].q_lock);
+ mpi3mr_memset_op_req_q_buffers(mrioc, i);
+ }
}
atomic_set(&mrioc->pend_large_data_sz, 0);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 372807485517..f57260e28c7a 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -353,12 +353,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (scsi_realloc_sdev_budget_map(sdev, depth)) {
- kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
- put_device(&starget->dev);
- kfree(sdev);
- goto out;
- }
+ if (scsi_realloc_sdev_budget_map(sdev, depth))
+ goto out_device_destroy;
scsi_change_queue_depth(sdev, depth);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 2c61624cb4b0..50e744e89129 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -529,9 +529,8 @@ struct efd {
};
static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
- void *data)
+ struct efd *efd)
{
- struct efd *efd = data;
int i;
struct ses_component *scomp;
@@ -684,7 +683,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
if (efd.addr) {
efd.dev = &sdev->sdev_gendev;
- enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
+ ses_enclosure_find_by_addr(edev, &efd);
}
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 9dcad02ce489..106bccaac427 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1861,8 +1861,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
cmd_request->payload_sz = payload_sz;
/* Invokes the vsc to start an IO */
- ret = storvsc_do_io(dev, cmd_request, get_cpu());
- put_cpu();
+ migrate_disable();
+ ret = storvsc_do_io(dev, cmd_request, smp_processor_id());
+ migrate_enable();
if (ret)
scsi_dma_unmap(scmnd);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 4dc8aba33d9b..0791b4191338 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1827,6 +1827,8 @@ EXPORT_SYMBOL(qman_create_fq);
void qman_destroy_fq(struct qman_fq *fq)
{
+ int leaked;
+
/*
* We don't need to lock the FQ as it is a pre-condition that the FQ be
* quiesced. Instead, run some checks.
@@ -1834,11 +1836,29 @@ void qman_destroy_fq(struct qman_fq *fq)
switch (fq->state) {
case qman_fq_state_parked:
case qman_fq_state_oos:
- if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
- qman_release_fqid(fq->fqid);
+ /*
+ * There's a race condition here on releasing the fqid,
+ * setting the fq_table to NULL, and freeing the fqid.
+ * To prevent it, this order should be respected:
+ */
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) {
+ leaked = qman_shutdown_fq(fq->fqid);
+ if (leaked)
+ pr_debug("FQID %d leaked\n", fq->fqid);
+ }
DPAA_ASSERT(fq_table[fq->idx]);
fq_table[fq->idx] = NULL;
+
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID) && !leaked) {
+ /*
+ * fq_table[fq->idx] should be set to null before
+ * freeing fq->fqid otherwise it could by allocated by
+ * qman_alloc_fqid() while still being !NULL
+ */
+ smp_wmb();
+ gen_pool_free(qm_fqalloc, fq->fqid | DPAA_GENALLOC_OFF, 1);
+ }
return;
default:
break;
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 36c0ccc06151..cc7032a0ad8c 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -1777,8 +1777,8 @@ static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
return -EINVAL;
qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
qmc->dpram = devm_ioremap_resource(qmc->dev, res);
- if (IS_ERR(qmc->scc_pram))
- return PTR_ERR(qmc->scc_pram);
+ if (IS_ERR(qmc->dpram))
+ return PTR_ERR(qmc->dpram);
return 0;
}
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
index 30bc45d17d34..81636cfecd37 100644
--- a/drivers/soc/microchip/mpfs-sys-controller.c
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -142,8 +142,10 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
sys_controller->flash = of_get_mtd_device_by_node(np);
of_node_put(np);
- if (IS_ERR(sys_controller->flash))
- return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n");
+ if (IS_ERR(sys_controller->flash)) {
+ ret = dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n");
+ goto out_free;
+ }
no_flash:
sys_controller->client.dev = dev;
@@ -155,8 +157,7 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
if (IS_ERR(sys_controller->chan)) {
ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan),
"Failed to get mbox channel\n");
- kfree(sys_controller);
- return ret;
+ goto out_free;
}
init_completion(&sys_controller->c);
@@ -174,6 +175,10 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Registered MPFS system controller\n");
return 0;
+
+out_free:
+ kfree(sys_controller);
+ return ret;
}
static void mpfs_sys_controller_remove(struct platform_device *pdev)
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index dddfe349b3da..6fd02220abf1 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -217,6 +217,7 @@ static int __init rockchip_grf_init(void)
grf = syscon_node_to_regmap(np);
if (IS_ERR(grf)) {
pr_err("%s: could not get grf syscon\n", __func__);
+ of_node_put(np);
return PTR_ERR(grf);
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index e1d64a9a3446..288a0467c937 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -105,6 +105,8 @@ struct cqspi_st {
bool is_jh7110; /* Flag for StarFive JH7110 SoC */
bool disable_stig_mode;
+ refcount_t refcount;
+ refcount_t inflight_ops;
const struct cqspi_driver_platdata *ddata;
};
@@ -731,6 +733,9 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
u8 *rxbuf_end = rxbuf + n_rx;
int ret = 0;
+ if (!refcount_read(&cqspi->refcount))
+ return -ENODEV;
+
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
@@ -1058,6 +1063,9 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
unsigned int write_bytes;
int ret;
+ if (!refcount_read(&cqspi->refcount))
+ return -ENODEV;
+
writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
@@ -1450,12 +1458,26 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &cqspi->pdev->dev;
+ if (refcount_read(&cqspi->inflight_ops) == 0)
+ return -ENODEV;
+
ret = pm_runtime_resume_and_get(dev);
if (ret) {
dev_err(&mem->spi->dev, "resume failed with %d\n", ret);
return ret;
}
+ if (!refcount_read(&cqspi->refcount))
+ return -EBUSY;
+
+ refcount_inc(&cqspi->inflight_ops);
+
+ if (!refcount_read(&cqspi->refcount)) {
+ if (refcount_read(&cqspi->inflight_ops))
+ refcount_dec(&cqspi->inflight_ops);
+ return -EBUSY;
+ }
+
ret = cqspi_mem_process(mem, op);
pm_runtime_mark_last_busy(dev);
@@ -1464,6 +1486,9 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (ret)
dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
+ if (refcount_read(&cqspi->inflight_ops) > 1)
+ refcount_dec(&cqspi->inflight_ops);
+
return ret;
}
@@ -1916,6 +1941,9 @@ static int cqspi_probe(struct platform_device *pdev)
}
}
+ refcount_set(&cqspi->refcount, 1);
+ refcount_set(&cqspi->inflight_ops, 1);
+
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
if (ret) {
@@ -1978,6 +2006,11 @@ static void cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ refcount_set(&cqspi->refcount, 0);
+
+ if (!refcount_dec_and_test(&cqspi->inflight_ops))
+ cqspi_wait_idle(cqspi);
+
spi_unregister_controller(cqspi->host);
cqspi_controller_enable(cqspi, 0);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 63b25d929a8b..7cf37e28e485 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2940,6 +2940,8 @@ static void spi_controller_release(struct device *dev)
struct spi_controller *ctlr;
ctlr = container_of(dev, struct spi_controller, dev);
+
+ free_percpu(ctlr->pcpu_statistics);
kfree(ctlr);
}
@@ -3080,6 +3082,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
if (!ctlr)
return NULL;
+ ctlr->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
+ if (!ctlr->pcpu_statistics) {
+ kfree(ctlr);
+ return NULL;
+ }
+
device_initialize(&ctlr->dev);
INIT_LIST_HEAD(&ctlr->queue);
spin_lock_init(&ctlr->queue_lock);
@@ -3367,17 +3375,8 @@ int spi_register_controller(struct spi_controller *ctlr)
dev_info(dev, "controller is unqueued, this is deprecated\n");
} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
status = spi_controller_initialize_queue(ctlr);
- if (status) {
- device_del(&ctlr->dev);
- goto free_bus_id;
- }
- }
- /* Add statistics */
- ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
- if (!ctlr->pcpu_statistics) {
- dev_err(dev, "Error allocating per-cpu statistics\n");
- status = -ENOMEM;
- goto destroy_queue;
+ if (status)
+ goto del_ctrl;
}
mutex_lock(&board_lock);
@@ -3391,8 +3390,8 @@ int spi_register_controller(struct spi_controller *ctlr)
acpi_register_spi_devices(ctlr);
return status;
-destroy_queue:
- spi_destroy_queue(ctlr);
+del_ctrl:
+ device_del(&ctlr->dev);
free_bus_id:
mutex_lock(&board_lock);
idr_remove(&spi_master_idr, ctlr->bus_num);
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 5abe2fddc3d7..83b770571bee 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -187,20 +187,25 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
cnt = 0;
- while (cnt < in_len) {
+ while (cnt + 2 <= in_len) {
+ u8 ie_len = in_ie[cnt + 1];
+
+ if (cnt + 2 + ie_len > in_len)
+ break;
+
if (eid == in_ie[cnt]
- && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ && (!oui || (ie_len >= oui_len && !memcmp(&in_ie[cnt + 2], oui, oui_len)))) {
target_ie = &in_ie[cnt];
if (ie)
- memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+ memcpy(ie, &in_ie[cnt], ie_len + 2);
if (ielen)
- *ielen = in_ie[cnt+1]+2;
+ *ielen = ie_len + 2;
break;
}
- cnt += in_ie[cnt+1]+2; /* goto next */
+ cnt += ie_len + 2; /* goto next */
}
return target_ie;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 1f5a23f13dde..3563017acc31 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1929,7 +1929,10 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
while (i < in_len) {
ielength = initial_out_len;
- if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) { /* WMM element ID and OUI */
+ if (i + 5 < in_len &&
+ in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 &&
+ in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 &&
+ in_ie[i + 5] == 0x02) {
for (j = i; j < i + 9; j++) {
out_ie[ielength] = in_ie[j];
ielength++;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index bdd26c9f34bd..3b6452e759d5 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -162,7 +162,22 @@ void serial8250_tx_dma_flush(struct uart_8250_port *p)
*/
dma->tx_size = 0;
+ /*
+ * We can't use `dmaengine_terminate_sync` because `uart_flush_buffer` is
+ * holding the uart port spinlock.
+ */
dmaengine_terminate_async(dma->txchan);
+
+ /*
+ * The callback might or might not run. If it doesn't run, we need to ensure
+ * that `tx_running` is cleared so that we can schedule new transactions.
+ * If it does run, then the zombie callback will clear `tx_running` again
+ * and perform a no-op since `tx_size` was cleared above.
+ *
+ * In either case, we ASSUME the DMA transaction will terminate before we
+ * issue a new `serial8250_tx_dma`.
+ */
+ dma->tx_running = 0;
}
int serial8250_rx_dma(struct uart_8250_port *p)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index ab6adfe24b5d..b34cacd9dd76 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -137,6 +137,8 @@ struct serial_private {
};
#define PCI_DEVICE_ID_HPE_PCI_SERIAL 0x37e
+#define PCIE_VENDOR_ID_ASIX 0x125B
+#define PCIE_DEVICE_ID_AX99100 0x9100
static const struct pci_device_id pci_use_msi[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
@@ -149,6 +151,8 @@ static const struct pci_device_id pci_use_msi[] = {
0xA000, 0x1000) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
PCI_ANY_ID, PCI_ANY_ID) },
+ { PCI_DEVICE_SUB(PCIE_VENDOR_ID_ASIX, PCIE_DEVICE_ID_AX99100,
+ 0xA000, 0x1000) },
{ }
};
@@ -912,6 +916,7 @@ static int pci_netmos_init(struct pci_dev *dev)
case PCI_DEVICE_ID_NETMOS_9912:
case PCI_DEVICE_ID_NETMOS_9922:
case PCI_DEVICE_ID_NETMOS_9900:
+ case PCIE_DEVICE_ID_AX99100:
num_serial = pci_netmos_9900_numports(dev);
break;
@@ -2507,6 +2512,14 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.init = pci_netmos_init,
.setup = pci_netmos_9900_setup,
},
+ {
+ .vendor = PCIE_VENDOR_ID_ASIX,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_netmos_init,
+ .setup = pci_netmos_9900_setup,
+ },
/*
* EndRun Technologies
*/
@@ -6028,6 +6041,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
0xA000, 0x3002,
0, 0, pbn_NETMOS9900_2s_115200 },
+ { PCIE_VENDOR_ID_ASIX, PCIE_DEVICE_ID_AX99100,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/*
* Best Connectivity and Rosewill PCI Multi I/O cards
*/
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 03aca7eaca16..b4c8388ea6fc 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2509,6 +2509,12 @@ void serial8250_do_shutdown(struct uart_port *port)
* the IRQ chain.
*/
serial_port_in(port, UART_RX);
+ /*
+ * LCR writes on DW UART can trigger late (unmaskable) IRQs.
+ * Handle them before releasing the handler.
+ */
+ synchronize_irq(port->irq);
+
serial8250_rpm_put(up);
up->ops->release_irq(up);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 71890f3244a0..94c3f5e10554 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -878,6 +878,7 @@ static int ulite_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index ad5866149e24..22d3cb0ddbca 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -483,8 +483,8 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
if (hba->mcq_enabled) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
-
- hwq_id = hwq->id;
+ if (hwq)
+ hwq_id = hwq->id;
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
@@ -6974,7 +6974,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
if (ret)
- outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+ outstanding_cqs = (1ULL << hba->nr_hw_queues) - 1;
/* Exclude the poll queues */
nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
@@ -9813,6 +9813,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
flush_work(&hba->eeh_work);
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
if (ret)
@@ -9867,7 +9868,6 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_link_active;
- cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
goto out;
set_link_active:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 73f9476774ae..35a8f56b920b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1379,6 +1379,8 @@ static int acm_probe(struct usb_interface *intf,
acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities;
if (quirks & NO_CAP_LINE)
acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
+ if (quirks & MISSING_CAP_BRK)
+ acm->ctrl_caps |= USB_CDC_CAP_BRK;
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
@@ -2002,6 +2004,9 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ /* CH343 supports CAP_BRK, but doesn't advertise it */
+ { USB_DEVICE(0x1a86, 0x55d3), .driver_info = MISSING_CAP_BRK, },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 759ac15631d3..76f73853a60b 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -113,3 +113,4 @@ struct acm {
#define CLEAR_HALT_CONDITIONS BIT(5)
#define SEND_ZERO_PACKET BIT(6)
#define DISABLE_ECHO BIT(7)
+#define MISSING_CAP_BRK BIT(8)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ecd6d1f39e49..92567324c5da 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -225,7 +225,8 @@ static void wdm_in_callback(struct urb *urb)
/* we may already be in overflow */
if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
memmove(desc->ubuf + desc->length, desc->inbuf, length);
- desc->length += length;
+ smp_wmb(); /* against wdm_read() */
+ WRITE_ONCE(desc->length, desc->length + length);
}
}
skip_error:
@@ -533,6 +534,7 @@ static ssize_t wdm_read
return -ERESTARTSYS;
cntr = READ_ONCE(desc->length);
+ smp_rmb(); /* against wdm_in_callback() */
if (cntr == 0) {
desc->read = 0;
retry:
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 75de29725a45..8179ea0914cf 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -727,7 +727,7 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
buffer[1] = data->bTag;
buffer[2] = ~data->bTag;
- retval = usb_bulk_msg(data->usb_dev,
+ retval = usb_bulk_msg_killable(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, USBTMC_HEADER_SIZE,
@@ -1347,7 +1347,7 @@ static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
buffer[11] = 0; /* Reserved */
/* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
+ retval = usb_bulk_msg_killable(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, USBTMC_HEADER_SIZE,
@@ -1419,7 +1419,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
actual = 0;
/* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
+ retval = usb_bulk_msg_killable(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
buffer, bufsize, &actual,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 6138468c67c4..43522f1d6b2b 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -42,16 +42,19 @@ static void usb_api_blocking_completion(struct urb *urb)
/*
- * Starts urb and waits for completion or timeout. Note that this call
- * is NOT interruptible. Many device driver i/o requests should be
- * interruptible and therefore these drivers should implement their
- * own interruptible routines.
+ * Starts urb and waits for completion or timeout.
+ * Whether or not the wait is killable depends on the flag passed in.
+ * For example, compare usb_bulk_msg() and usb_bulk_msg_killable().
+ *
+ * For non-killable waits, we enforce a maximum limit on the timeout value.
*/
-static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
+static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length,
+ bool killable)
{
struct api_context ctx;
unsigned long expire;
int retval;
+ long rc;
init_completion(&ctx.done);
urb->context = &ctx;
@@ -60,13 +63,24 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
if (unlikely(retval))
goto out;
- expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_timeout(&ctx.done, expire)) {
+ if (!killable && (timeout <= 0 || timeout > USB_MAX_SYNCHRONOUS_TIMEOUT))
+ timeout = USB_MAX_SYNCHRONOUS_TIMEOUT;
+ expire = (timeout > 0) ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
+ if (killable)
+ rc = wait_for_completion_killable_timeout(&ctx.done, expire);
+ else
+ rc = wait_for_completion_timeout(&ctx.done, expire);
+ if (rc <= 0) {
usb_kill_urb(urb);
- retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status);
+ if (ctx.status != -ENOENT)
+ retval = ctx.status;
+ else if (rc == 0)
+ retval = -ETIMEDOUT;
+ else
+ retval = rc;
dev_dbg(&urb->dev->dev,
- "%s timed out on ep%d%s len=%u/%u\n",
+ "%s timed out or killed on ep%d%s len=%u/%u\n",
current->comm,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
@@ -100,7 +114,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data,
len, usb_api_blocking_completion, NULL);
- retv = usb_start_wait_urb(urb, timeout, &length);
+ retv = usb_start_wait_urb(urb, timeout, &length, false);
if (retv < 0)
return retv;
else
@@ -117,8 +131,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
* @index: USB message index value
* @data: pointer to the data to send
* @size: length in bytes of the data to send
- * @timeout: time in msecs to wait for the message to complete before timing
- * out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
*
* Context: task context, might sleep.
*
@@ -173,8 +186,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
* @index: USB message index value
* @driver_data: pointer to the data to send
* @size: length in bytes of the data to send
- * @timeout: time in msecs to wait for the message to complete before timing
- * out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
* @memflags: the flags for memory allocation for buffers
*
* Context: !in_interrupt ()
@@ -232,8 +244,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg_send);
* @index: USB message index value
* @driver_data: pointer to the data to be filled in by the message
* @size: length in bytes of the data to be received
- * @timeout: time in msecs to wait for the message to complete before timing
- * out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
* @memflags: the flags for memory allocation for buffers
*
* Context: !in_interrupt ()
@@ -304,8 +315,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg_recv);
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
- * @timeout: time in msecs to wait for the message to complete before
- * timing out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
*
* Context: task context, might sleep.
*
@@ -337,8 +347,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
- * @timeout: time in msecs to wait for the message to complete before
- * timing out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
*
* Context: task context, might sleep.
*
@@ -385,10 +394,59 @@ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL);
- return usb_start_wait_urb(urb, timeout, actual_length);
+ return usb_start_wait_urb(urb, timeout, actual_length, false);
}
EXPORT_SYMBOL_GPL(usb_bulk_msg);
+/**
+ * usb_bulk_msg_killable - Builds a bulk urb, sends it off and waits for completion in a killable state
+ * @usb_dev: pointer to the usb device to send the message to
+ * @pipe: endpoint "pipe" to send the message to
+ * @data: pointer to the data to send
+ * @len: length in bytes of the data to send
+ * @actual_length: pointer to a location to put the actual length transferred
+ * in bytes
+ * @timeout: time in msecs to wait for the message to complete before
+ * timing out (if <= 0, the wait is as long as possible)
+ *
+ * Context: task context, might sleep.
+ *
+ * This function is just like usb_blk_msg(), except that it waits in a
+ * killable state and there is no limit on the timeout length.
+ *
+ * Return:
+ * If successful, 0. Otherwise a negative error number. The number of actual
+ * bytes transferred will be stored in the @actual_length parameter.
+ *
+ */
+int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout)
+{
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+
+ ep = usb_pipe_endpoint(usb_dev, pipe);
+ if (!ep || len < 0)
+ return -EINVAL;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT) {
+ pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+ usb_fill_int_urb(urb, usb_dev, pipe, data, len,
+ usb_api_blocking_completion, NULL,
+ ep->desc.bInterval);
+ } else
+ usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
+ usb_api_blocking_completion, NULL);
+
+ return usb_start_wait_urb(urb, timeout, actual_length, true);
+}
+EXPORT_SYMBOL_GPL(usb_bulk_msg_killable);
+
/*-------------------------------------------------------------------*/
static void sg_clean(struct usb_sg_request *io)
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index faa20054ad5a..4bba1c275740 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -200,16 +200,10 @@ int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
list_for_each_entry(roothub_entry, head, list) {
err = phy_set_mode(roothub_entry->phy, mode);
if (err)
- goto err_out;
+ return err;
}
return 0;
-
-err_out:
- list_for_each_entry_continue_reverse(roothub_entry, head, list)
- phy_power_off(roothub_entry->phy);
-
- return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 323a949bbb05..c12942a533ce 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -208,6 +208,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* HP v222w 16GB Mini USB Drive */
{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Huawei 4G LTE module ME906S */
+ { USB_DEVICE(0x03f0, 0xa31d), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -377,6 +381,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* SanDisk Extreme 55AE */
{ USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM },
+ /* Avermedia Live Gamer Ultra 2.1 (GC553G2) - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x07ca, 0x2553), .driver_info = USB_QUIRK_NO_BOS },
+
/* Realforce 87U Keyboard */
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
@@ -434,6 +441,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* ASUS TUF 4K PRO - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x0b05, 0x1ab9), .driver_info = USB_QUIRK_NO_BOS },
+
/* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
{ USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -562,6 +572,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+ /* UGREEN 35871 - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x2b89, 0x5871), .driver_info = USB_QUIRK_NO_BOS },
+
/* APTIV AUTOMOTIVE HUB */
{ USB_DEVICE(0x2c48, 0x0132), .driver_info =
USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
@@ -572,6 +585,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
{ USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+ /* ezcap401 - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x32ed, 0x0401), .driver_info = USB_QUIRK_NO_BOS },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 8f5faf632a8b..6110bd96a60e 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -56,6 +56,7 @@
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
+#define PCI_DEVICE_ID_INTEL_NVLH 0xd37f
#define PCI_DEVICE_ID_INTEL_PTLH 0xe332
#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e
#define PCI_DEVICE_ID_INTEL_PTLU 0xe432
@@ -448,6 +449,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, CNPH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, CNPV, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, RPL, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, NVLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 08e0d1c511e8..74cb7e57a197 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -180,6 +180,7 @@
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/limits.h>
+#include <linux/overflow.h>
#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
@@ -1853,8 +1854,15 @@ static int check_command_size_in_blocks(struct fsg_common *common,
int cmnd_size, enum data_direction data_dir,
unsigned int mask, int needs_medium, const char *name)
{
- if (common->curlun)
- common->data_size_from_cmnd <<= common->curlun->blkbits;
+ if (common->curlun) {
+ if (check_shl_overflow(common->data_size_from_cmnd,
+ common->curlun->blkbits,
+ &common->data_size_from_cmnd)) {
+ common->phase_error = 1;
+ return -EINVAL;
+ }
+ }
+
return check_command(common, cmnd_size, data_dir,
mask, needs_medium, name);
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 28e57d93973a..b6d22cf43114 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1438,6 +1438,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
struct f_ncm_opts *ncm_opts;
struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
+ struct net_device *net __free(detach_gadget) = NULL;
struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_ecm(cdev->gadget))
@@ -1451,18 +1452,19 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
return -ENOMEM;
}
- mutex_lock(&ncm_opts->lock);
- gether_set_gadget(ncm_opts->net, cdev->gadget);
- if (!ncm_opts->bound) {
- ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
- status = gether_register_netdev(ncm_opts->net);
- }
- mutex_unlock(&ncm_opts->lock);
-
- if (status)
- return status;
-
- ncm_opts->bound = true;
+ scoped_guard(mutex, &ncm_opts->lock)
+ if (ncm_opts->bind_count == 0) {
+ if (!device_is_registered(&ncm_opts->net->dev)) {
+ ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
+ gether_set_gadget(ncm_opts->net, cdev->gadget);
+ status = gether_register_netdev(ncm_opts->net);
+ } else
+ status = gether_attach_gadget(ncm_opts->net, cdev->gadget);
+
+ if (status)
+ return status;
+ net = ncm_opts->net;
+ }
ncm_string_defs[1].s = ncm->ethaddr;
@@ -1564,6 +1566,9 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
}
ncm->notify_req = no_free_ptr(request);
+ ncm_opts->bind_count++;
+ retain_and_null_ptr(net);
+
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
ncm->notify->name);
@@ -1655,7 +1660,7 @@ static void ncm_free_inst(struct usb_function_instance *f)
struct f_ncm_opts *opts;
opts = container_of(f, struct f_ncm_opts, func_inst);
- if (opts->bound)
+ if (device_is_registered(&opts->net->dev))
gether_cleanup(netdev_priv(opts->net));
else
free_netdev(opts->net);
@@ -1718,9 +1723,12 @@ static void ncm_free(struct usb_function *f)
static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
+ struct f_ncm_opts *ncm_opts;
DBG(c->cdev, "ncm unbind\n");
+ ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+
hrtimer_cancel(&ncm->task_timer);
kfree(f->os_desc_table);
@@ -1736,6 +1744,10 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+ ncm_opts->bind_count--;
+ if (ncm_opts->bind_count == 0)
+ gether_detach_gadget(ncm_opts->net);
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 6ad205046032..cca19b465e88 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1032,6 +1032,13 @@ static void usbg_cmd_work(struct work_struct *work)
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
+
+ dev_err(&gadget->dev, "Missing nexus, ignoring command\n");
+ return;
+ }
+
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0) {
__target_init_cmd(se_cmd,
@@ -1160,6 +1167,13 @@ static void bot_cmd_work(struct work_struct *work)
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
+
+ dev_err(&gadget->dev, "Missing nexus, ignoring command\n");
+ return;
+ }
+
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0) {
__target_init_cmd(se_cmd,
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index f58590bf5e02..dabaa6669251 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -896,6 +896,28 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
+int gether_attach_gadget(struct net_device *net, struct usb_gadget *g)
+{
+ int ret;
+
+ ret = device_move(&net->dev, &g->dev, DPM_ORDER_DEV_AFTER_PARENT);
+ if (ret)
+ return ret;
+
+ gether_set_gadget(net, g);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gether_attach_gadget);
+
+void gether_detach_gadget(struct net_device *net)
+{
+ struct eth_dev *dev = netdev_priv(net);
+
+ device_move(&net->dev, NULL, DPM_ORDER_NONE);
+ dev->gadget = NULL;
+}
+EXPORT_SYMBOL_GPL(gether_detach_gadget);
+
int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
{
struct eth_dev *dev;
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 34be220cef77..c85a1cf3c115 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -150,6 +150,32 @@ static inline struct net_device *gether_setup_default(void)
*/
void gether_set_gadget(struct net_device *net, struct usb_gadget *g);
+/**
+ * gether_attach_gadget - Reparent net_device to the gadget device.
+ * @net: The network device to reparent.
+ * @g: The target USB gadget device to parent to.
+ *
+ * This function moves the network device to be a child of the USB gadget
+ * device in the device hierarchy. This is typically done when the function
+ * is bound to a configuration.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int gether_attach_gadget(struct net_device *net, struct usb_gadget *g);
+
+/**
+ * gether_detach_gadget - Detach net_device from its gadget parent.
+ * @net: The network device to detach.
+ *
+ * This function moves the network device to be a child of the virtual
+ * devices parent, effectively detaching it from the USB gadget device
+ * hierarchy. This is typically done when the function is unbound
+ * from a configuration but the instance is not yet freed.
+ */
+void gether_detach_gadget(struct net_device *net);
+
+DEFINE_FREE(detach_gadget, struct net_device *, if (_T) gether_detach_gadget(_T))
+
/**
* gether_set_dev_addr - initialize an ethernet-over-usb link with eth address
* @net: device representing this link
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 49ec095cdb4b..b1f3db8b68c1 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -18,7 +18,7 @@
struct f_ncm_opts {
struct usb_function_instance func_inst;
struct net_device *net;
- bool bound;
+ int bind_count;
struct config_group *ncm_interf_group;
struct usb_os_desc ncm_os_desc;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4fdedbcf429c..9d506409fdf6 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3200,6 +3200,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
+ xhci_halt(xhci);
goto out;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 4c6c4aa7d371..45e786852920 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3939,7 +3939,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
spin_unlock_irqrestore(&xhci->lock, flags);
- kfree(command);
+ xhci_free_command(xhci, command);
return -ENODEV;
}
@@ -3947,7 +3947,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- kfree(command);
+ xhci_free_command(xhci, command);
return ret;
}
xhci_ring_cmd_db(xhci);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 7b7e1554ea20..10d72562e4d2 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -707,7 +707,7 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
if (signal_pending (current))
{
mutex_unlock(&mdc800->io_lock);
- return -EINTR;
+ return len == left ? -EINTR : len-left;
}
sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left;
@@ -730,9 +730,11 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
mutex_unlock(&mdc800->io_lock);
return len-left;
}
- wait_event_timeout(mdc800->download_wait,
+ retval = wait_event_timeout(mdc800->download_wait,
mdc800->downloaded,
msecs_to_jiffies(TO_DOWNLOAD_GET_READY));
+ if (!retval)
+ usb_kill_urb(mdc800->download_urb);
mdc800->downloaded = 0;
if (mdc800->download_urb->status != 0)
{
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index b26c1d382d59..3138f5dca6da 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -736,7 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
ret = get_1284_register(pp, 0, ®, GFP_KERNEL);
dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
if (ret < 0)
- return ret;
+ goto probe_abort;
ret = usb_find_last_int_in_endpoint(interface, &epd);
if (!ret) {
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 70dff0db5354..6d03e689850a 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -272,6 +272,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
dev, 1);
dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ dev->bbu = -1;
if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
retval = -EIO;
dev_err(&interface->dev, "Could not submitting URB\n");
@@ -280,7 +281,6 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
- dev->bbu = -1;
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &yurex_class);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 7f24fe82292c..ef9c3bd950ca 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -811,6 +811,15 @@ static void usbhs_remove(struct platform_device *pdev)
usbhs_platform_call(priv, hardware_exit, pdev);
reset_control_assert(priv->rsts);
+
+ /*
+ * Explicitly free the IRQ to ensure the interrupt handler is
+ * disabled and synchronized before freeing resources.
+ * devm_free_irq() calls free_irq() which waits for any running
+ * ISR to complete, preventing UAF.
+ */
+ devm_free_irq(&pdev->dev, priv->irq, priv);
+
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 30482d4cf826..b7cdc62d420f 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -139,9 +139,14 @@ static void *usb_role_switch_match(const struct fwnode_handle *fwnode, const cha
static struct usb_role_switch *
usb_role_switch_is_parent(struct fwnode_handle *fwnode)
{
- struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+ struct fwnode_handle *parent;
struct device *dev;
+ if (!fwnode_device_is_compatible(fwnode, "usb-b-connector"))
+ return NULL;
+
+ parent = fwnode_get_parent(fwnode);
+
if (!fwnode_property_present(parent, "usb-role-switch")) {
fwnode_handle_put(parent);
return NULL;
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 530b77fc2f78..9262a2ac97f5 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -70,7 +70,6 @@ MODULE_DEVICE_TABLE(usb, combined_id_table);
#define F81232_REGISTER_REQUEST 0xa0
#define F81232_GET_REGISTER 0xc0
#define F81232_SET_REGISTER 0x40
-#define F81534A_ACCESS_REG_RETRY 2
#define SERIAL_BASE_ADDRESS 0x0120
#define RECEIVE_BUFFER_REGISTER (0x00 + SERIAL_BASE_ADDRESS)
@@ -824,36 +823,31 @@ static void f81232_lsr_worker(struct work_struct *work)
static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
u16 size, void *val)
{
- struct usb_device *dev = interface_to_usbdev(intf);
- int retry = F81534A_ACCESS_REG_RETRY;
- int status;
-
- while (retry--) {
- status = usb_control_msg_send(dev,
- 0,
- F81232_REGISTER_REQUEST,
- F81232_SET_REGISTER,
- reg,
- 0,
- val,
- size,
- USB_CTRL_SET_TIMEOUT,
- GFP_KERNEL);
- if (status) {
- status = usb_translate_errors(status);
- if (status == -EIO)
- continue;
- }
-
- break;
- }
-
- if (status) {
- dev_err(&intf->dev, "failed to set register 0x%x: %d\n",
- reg, status);
- }
+ return usb_control_msg_send(interface_to_usbdev(intf),
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_SET_REGISTER,
+ reg,
+ 0,
+ val,
+ size,
+ USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
+}
- return status;
+static int f81534a_ctrl_get_register(struct usb_interface *intf, u16 reg,
+ u16 size, void *val)
+{
+ return usb_control_msg_recv(interface_to_usbdev(intf),
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_GET_REGISTER,
+ reg,
+ 0,
+ val,
+ size,
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
}
static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en)
@@ -869,6 +863,29 @@ static int f81534a_ctrl_enable_all_ports(struct usb_interface *intf, bool en)
* bit 0~11 : Serial port enable bit.
*/
if (en) {
+ /*
+ * The Fintek F81532A/534A/535/536 family relies on the
+ * F81534A_CTRL_CMD_ENABLE_PORT (116h) register during
+ * initialization to both determine serial port status and
+ * control port creation.
+ *
+ * If the driver experiences fast load/unload cycles, the
+ * device state may becomes unstable, resulting in the
+ * incomplete generation of serial ports.
+ *
+ * Performing a dummy read operation on the register prior
+ * to the initial write command resolves the issue.
+ *
+ * This clears the device's stale internal state. Subsequent
+ * write operations will correctly generate all serial ports.
+ */
+ status = f81534a_ctrl_get_register(intf,
+ F81534A_CTRL_CMD_ENABLE_PORT,
+ sizeof(enable),
+ enable);
+ if (status)
+ return status;
+
enable[0] = 0xff;
enable[1] = 0x8f;
}
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 67cb974ec761..5439e760a563 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -93,9 +93,14 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
{
u8 pin_assign = 0;
u32 conf;
+ u32 signal;
/* DP Signalling */
- conf = (dp->data.conf & DP_CONF_SIGNALLING_MASK) >> DP_CONF_SIGNALLING_SHIFT;
+ signal = DP_CAP_DP_SIGNALLING(dp->port->vdo) & DP_CAP_DP_SIGNALLING(dp->alt->vdo);
+ if (dp->plug_prime)
+ signal &= DP_CAP_DP_SIGNALLING(dp->plug_prime->vdo);
+
+ conf = signal << DP_CONF_SIGNALLING_SHIFT;
switch (con) {
case DP_STATUS_CON_DISABLED:
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8e97e5820eee..b0e6c58e6a59 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7697,7 +7697,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
- if (IS_ERR_OR_NULL(port->role_sw))
+ if (!port->role_sw)
port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 4f75bc876454..b366192c77cf 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -12,6 +12,7 @@
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -30,7 +31,10 @@
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/security.h>
#include <linux/virtio_mmio.h>
+#include <linux/wait.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -46,6 +50,7 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
#include <xen/balloon.h>
+#include <xen/xenbus.h>
#ifdef CONFIG_XEN_ACPI
#include <xen/acpi.h>
#endif
@@ -68,10 +73,20 @@ module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
MODULE_PARM_DESC(dm_op_buf_max_size,
"Maximum size of a dm_op hypercall buffer");
+static bool unrestricted;
+module_param(unrestricted, bool, 0);
+MODULE_PARM_DESC(unrestricted,
+ "Don't restrict hypercalls to target domain if running in a domU");
+
struct privcmd_data {
domid_t domid;
};
+/* DOMID_INVALID implies no restriction */
+static domid_t target_domain = DOMID_INVALID;
+static bool restrict_wait;
+static DECLARE_WAIT_QUEUE_HEAD(restrict_wait_wq);
+
static int privcmd_vma_range_is_mapped(
struct vm_area_struct *vma,
unsigned long addr,
@@ -1582,13 +1597,16 @@ static long privcmd_ioctl(struct file *file,
static int privcmd_open(struct inode *ino, struct file *file)
{
- struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+ struct privcmd_data *data;
+
+ if (wait_event_interruptible(restrict_wait_wq, !restrict_wait) < 0)
+ return -EINTR;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- /* DOMID_INVALID implies no restriction */
- data->domid = DOMID_INVALID;
+ data->domid = target_domain;
file->private_data = data;
return 0;
@@ -1681,6 +1699,52 @@ static struct miscdevice privcmd_dev = {
.fops = &xen_privcmd_fops,
};
+static int init_restrict(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ char *target;
+ unsigned int domid;
+
+ /* Default to an guaranteed unused domain-id. */
+ target_domain = DOMID_IDLE;
+
+ target = xenbus_read(XBT_NIL, "target", "", NULL);
+ if (IS_ERR(target) || kstrtouint(target, 10, &domid)) {
+ pr_err("No target domain found, blocking all hypercalls\n");
+ goto out;
+ }
+
+ target_domain = domid;
+
+ out:
+ if (!IS_ERR(target))
+ kfree(target);
+
+ restrict_wait = false;
+ wake_up_all(&restrict_wait_wq);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block xenstore_notifier = {
+ .notifier_call = init_restrict,
+};
+
+static void __init restrict_driver(void)
+{
+ if (unrestricted) {
+ if (security_locked_down(LOCKDOWN_XEN_USER_ACTIONS))
+ pr_warn("Kernel is locked down, parameter \"unrestricted\" ignored\n");
+ else
+ return;
+ }
+
+ restrict_wait = true;
+
+ register_xenstore_notifier(&xenstore_notifier);
+}
+
static int __init privcmd_init(void)
{
int err;
@@ -1688,6 +1752,9 @@ static int __init privcmd_init(void)
if (!xen_domain())
return -ENODEV;
+ if (!xen_initial_domain())
+ restrict_driver();
+
err = misc_register(&privcmd_dev);
if (err != 0) {
pr_err("Could not register Xen privcmd device\n");
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 6a3a16f91051..9fbff75e3faa 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -875,8 +875,10 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
inode_unlock(d_inode(root));
if (err) {
- if (f)
+ if (f) {
+ allow_write_access(f);
filp_close(f, NULL);
+ }
kfree(e);
return err;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fa4d22f6f29d..dea64839d2ca 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -498,28 +498,6 @@ static int btree_migrate_folio(struct address_space *mapping,
#define btree_migrate_folio NULL
#endif
-static int btree_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- int ret;
-
- if (wbc->sync_mode == WB_SYNC_NONE) {
- struct btrfs_fs_info *fs_info;
-
- if (wbc->for_kupdate)
- return 0;
-
- fs_info = inode_to_fs_info(mapping->host);
- /* this is a bit racy, but that's ok */
- ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH,
- fs_info->dirty_metadata_batch);
- if (ret < 0)
- return 0;
- }
- return btree_write_cache_pages(mapping, wbc);
-}
-
static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (folio_test_writeback(folio) || folio_test_dirty(folio))
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1e855c5854ce..2e8dc928621c 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2088,8 +2088,7 @@ static int submit_eb_page(struct folio *folio, struct btrfs_eb_write_context *ct
return 1;
}
-int btree_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc)
+int btree_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct btrfs_eb_write_context ctx = { .wbc = wbc };
struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 039a73731135..c63ccfb9fc37 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -244,8 +244,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty);
int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
-int btree_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc);
+int btree_writepages(struct address_space *mapping, struct writeback_control *wbc);
void btrfs_readahead(struct readahead_control *rac);
int set_folio_extent_mapped(struct folio *folio);
int set_page_extent_mapped(struct page *page);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 09ebe5acbe43..4bff5d5953ed 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6358,6 +6358,25 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
int ret;
bool xa_reserved = false;
+ if (!args->orphan && !args->subvol) {
+ /*
+ * Before anything else, check if we can add the name to the
+ * parent directory. We want to avoid a dir item overflow in
+ * case we have an existing dir item due to existing name
+ * hash collisions. We do this check here before we call
+ * btrfs_add_link() down below so that we can avoid a
+ * transaction abort (which could be exploited by malicious
+ * users).
+ *
+ * For subvolumes we already do this in btrfs_mksubvol().
+ */
+ ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
+ btrfs_ino(BTRFS_I(dir)),
+ name);
+ if (ret < 0)
+ return ret;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index f15cbbb81660..72d73953d1b7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4097,6 +4097,25 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
goto out;
}
+ received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
+ BTRFS_UUID_SIZE);
+
+ /*
+ * Before we attempt to add the new received uuid, check if we have room
+ * for it in case there's already an item. If the size of the existing
+ * item plus this root's ID (u64) exceeds the maximum item size, we can
+ * return here without the need to abort a transaction. If we don't do
+ * this check, the btrfs_uuid_tree_add() call below would fail with
+ * -EOVERFLOW and result in a transaction abort. Malicious users could
+ * exploit this to turn the fs into RO mode.
+ */
+ if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
+ ret = btrfs_uuid_tree_check_overflow(fs_info, sa->uuid,
+ BTRFS_UUID_KEY_RECEIVED_SUBVOL);
+ if (ret < 0)
+ goto out;
+ }
+
/*
* 1 - root item
* 2 - uuid items (received uuid + subvol uuid)
@@ -4112,8 +4131,6 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
sa->rtime.sec = ct.tv_sec;
sa->rtime.nsec = ct.tv_nsec;
- received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
- BTRFS_UUID_SIZE);
if (received_uuid_changed &&
!btrfs_is_empty_uuid(root_item->received_uuid)) {
ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
@@ -4135,7 +4152,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
}
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index af19f7a3e74a..ada19b328861 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -2128,8 +2128,11 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
if (!btrfs_should_periodic_reclaim(space_info))
continue;
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
- if (do_reclaim_sweep(space_info, raid))
+ if (do_reclaim_sweep(space_info, raid)) {
+ spin_lock(&space_info->lock);
btrfs_set_periodic_reclaim_ready(space_info, false);
+ spin_unlock(&space_info->lock);
+ }
}
}
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 0ee6b40af65e..0a147bc5f090 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1894,6 +1894,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
+ /*
+ * We are creating of lot of snapshots of the same root that was
+ * received (has a received UUID) and reached a leaf's limit for
+ * an item. We can safely ignore this and avoid a transaction
+ * abort. A deletion of this snapshot will still work since we
+ * ignore if an item with a BTRFS_UUID_KEY_RECEIVED_SUBVOL key
+ * is missing (see btrfs_delete_subvolume()). Send/receive will
+ * work too since it peeks the first root id from the existing
+ * item (it could peek any), and in case it's missing it
+ * falls back to search by BTRFS_UUID_KEY_SUBVOL keys.
+ * Creation of a snapshot does not require CAP_SYS_ADMIN, so
+ * we don't want users triggering transaction aborts, either
+ * intentionally or not.
+ */
+ if (ret == -EOVERFLOW)
+ ret = 0;
if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, ret);
goto fail;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 60bba7fbeb35..7e9475e2a047 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1244,7 +1244,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
}
if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) {
generic_err(leaf, slot,
- "invalid root level, have %u expect [0, %u]",
+ "invalid root drop_level, have %u expect [0, %u]",
btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1);
return -EUCLEAN;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index fa1199fb6b3d..28dcf8a8997b 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5886,6 +5886,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
+ const bool orig_log_new_dentries = ctx->log_new_dentries;
int ret = 0;
/*
@@ -5947,7 +5948,11 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* dir index key range logged for the directory. So we
* must make sure the deletion is recorded.
*/
+ ctx->log_new_dentries = false;
ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
+ if (!ret && ctx->log_new_dentries)
+ ret = log_new_dir_dentries(trans, inode, ctx);
+
btrfs_add_delayed_iput(inode);
if (ret)
break;
@@ -5982,6 +5987,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
break;
}
+ ctx->log_new_dentries = orig_log_new_dentries;
ctx->logging_conflict_inodes = false;
if (ret)
free_conflicting_inodes(ctx);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index aca2861f2187..8e1b2e764e00 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -229,6 +229,44 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
return ret;
}
+/*
+ * Check if we can add one root ID to a UUID key.
+ * If the key does not yet exists, we can, otherwise only if extended item does
+ * not exceeds the maximum item size permitted by the leaf size.
+ *
+ * Returns 0 on success, negative value on error.
+ */
+int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info,
+ const u8 *uuid, u8 type)
+{
+ BTRFS_PATH_AUTO_FREE(path);
+ int ret;
+ u32 item_size;
+ struct btrfs_key key;
+
+ if (WARN_ON_ONCE(!fs_info->uuid_root))
+ return -EINVAL;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ btrfs_uuid_to_key(uuid, type, &key);
+ ret = btrfs_search_slot(NULL, fs_info->uuid_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
+
+ item_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+
+ if (sizeof(struct btrfs_item) + item_size + sizeof(u64) >
+ BTRFS_LEAF_DATA_SIZE(fs_info))
+ return -EOVERFLOW;
+
+ return 0;
+}
+
static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
u64 subid)
{
diff --git a/fs/btrfs/uuid-tree.h b/fs/btrfs/uuid-tree.h
index c60ad20325cc..02b235a3653f 100644
--- a/fs/btrfs/uuid-tree.h
+++ b/fs/btrfs/uuid-tree.h
@@ -12,6 +12,8 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
u64 subid);
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid);
+int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info,
+ const u8 *uuid, u8 type);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_uuid_scan_kthread(void *data);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 24b6a7f83f93..b723e860e4e9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6522,8 +6522,10 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
return PTR_ERR(map);
num_copies = btrfs_chunk_map_num_copies(map);
- if (io_geom.mirror_num > num_copies)
- return -EINVAL;
+ if (io_geom.mirror_num > num_copies) {
+ ret = -EINVAL;
+ goto out;
+ }
map_offset = logical - map->start;
io_geom.raid56_full_stripe_start = (u64)-1;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 74297fd2a365..c79a11ce7a4c 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -79,7 +79,7 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
@@ -98,7 +98,7 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ba960a9bfbc8..a0850916d353 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1330,6 +1330,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = d_inode(dentry);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
struct dentry *dn;
@@ -1354,7 +1355,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
if (!dn) {
try_async = false;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
@@ -1415,7 +1416,19 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
* We have enough caps, so we assume that the unlink
* will succeed. Fix up the target inode and dcache.
*/
- drop_nlink(inode);
+
+ /*
+ * Protect the i_nlink update with i_ceph_lock
+ * to precent racing against ceph_fill_inode()
+ * handling our completion on a worker thread
+ * and don't decrement if i_nlink has already
+ * been updated to zero by this completion.
+ */
+ spin_lock(&ci->i_ceph_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&ci->i_ceph_lock);
+
d_delete(dentry);
} else {
spin_lock(&fsc->async_unlink_conflict_lock);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 9e3122db3d78..b21bd6b84c49 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -397,7 +397,7 @@ int ceph_open(struct inode *inode, struct file *file)
if (!dentry) {
do_sync = true;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
@@ -807,7 +807,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if (!dn) {
try_async = false;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ead51d9e019b..92ea6085957c 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2546,7 +2546,7 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
if (!dentry) {
do_sync = true;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 0476501d70ba..7188378df704 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2766,6 +2766,7 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
if (ret < 0) {
dput(parent);
dput(cur);
+ __putname(path);
return ERR_PTR(ret);
}
@@ -2775,6 +2776,7 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
if (len < 0) {
dput(parent);
dput(cur);
+ __putname(path);
return ERR_PTR(len);
}
}
@@ -2811,6 +2813,7 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
* cannot ever succeed. Creating paths that long is
* possible with Ceph, but Linux cannot use them.
*/
+ __putname(path);
return ERR_PTR(-ENAMETOOLONG);
}
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 7116f20a7fbe..6e369d136eb8 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -787,6 +787,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
struct super_block *sb = fe->inode->i_sb;
erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct z_erofs_pcluster *pcl = NULL;
+ void *ptr = NULL;
int ret;
DBG_BUGON(fe->pcl);
@@ -807,6 +808,14 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
DBG_BUGON(1);
return -EFSCORRUPTED;
+ } else {
+ ptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
+ if (IS_ERR(ptr)) {
+ erofs_err(sb, "failed to read inline data %pe @ pa %llu of nid %llu",
+ ptr, map->m_pa, EROFS_I(fe->inode)->nid);
+ return PTR_ERR(ptr);
+ }
+ ptr = map->buf.page;
}
if (pcl) {
@@ -836,16 +845,8 @@ static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe)
/* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe);
} else {
- void *mptr;
-
- mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
- if (IS_ERR(mptr)) {
- ret = PTR_ERR(mptr);
- erofs_err(sb, "failed to get inline data %d", ret);
- return ret;
- }
- get_page(map->buf.page);
- WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
+ get_page((struct page *)ptr);
+ WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, ptr);
fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK;
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 70bb5ded4fd6..cda7952526aa 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -23,20 +23,18 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;
-static void *page_array_alloc(struct inode *inode, int nr)
+static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (likely(size <= sbi->page_array_slab_size))
return f2fs_kmem_cache_alloc(sbi->page_array_slab,
- GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
+ GFP_F2FS_ZERO, false, sbi);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
-static void page_array_free(struct inode *inode, void *pages, int nr)
+static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (!pages)
@@ -147,13 +145,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;
- cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
{
- page_array_free(cc->inode, cc->rpages, cc->cluster_size);
+ page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -213,13 +211,13 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
dic->rbuf, &dic->rlen);
if (ret != LZO_E_OK) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lzo decompress failed, ret:%d", ret);
return -EIO;
}
if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lzo invalid rlen:%zu, expected:%lu",
dic->rlen, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -293,13 +291,13 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
dic->clen, dic->rlen);
if (ret < 0) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lz4 decompress failed, ret:%d", ret);
return -EIO;
}
if (ret != PAGE_SIZE << dic->log_cluster_size) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"lz4 invalid ret:%d, expected:%lu",
ret, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -427,7 +425,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"%s zstd_init_dstream failed", __func__);
vfree(workspace);
return -EIO;
@@ -463,14 +461,14 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
if (zstd_is_error(ret)) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"%s zstd_decompress_stream failed, ret: %d",
__func__, zstd_get_error_code(ret));
return -EIO;
}
if (dic->rlen != outbuf.pos) {
- f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
+ f2fs_err_ratelimited(dic->sbi,
"%s ZSTD invalid rlen:%zu, expected:%lu",
__func__, dic->rlen,
PAGE_SIZE << dic->log_cluster_size);
@@ -616,6 +614,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
static int f2fs_compress_pages(struct compress_ctx *cc)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -636,7 +635,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
cc->valid_nr_cpages = cc->nr_cpages;
- cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
+ cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -711,7 +710,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -729,7 +728,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct f2fs_sb_info *sbi = dic->sbi;
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -799,7 +798,7 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
- struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+ struct f2fs_sb_info *sbi = dic->sbi;
dec_page_count(sbi, F2FS_RD_DATA);
@@ -1325,7 +1324,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
- cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ cic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
@@ -1427,13 +1426,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc, false);
return 0;
out_destroy_crypt:
- page_array_free(cc->inode, cic->rpages, cc->cluster_size);
+ page_array_free(sbi, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--) {
if (!cc->cpages[i])
@@ -1454,7 +1453,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
return -EAGAIN;
}
@@ -1484,7 +1483,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
- page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
+ page_array_free(sbi, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}
@@ -1616,14 +1615,13 @@ static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
bool pre_alloc)
{
- const struct f2fs_compress_ops *cops =
- f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
int i;
- if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
return 0;
- dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
+ dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
if (!dic->tpages)
return -ENOMEM;
@@ -1653,10 +1651,9 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
bool bypass_destroy_callback, bool pre_alloc)
{
- const struct f2fs_compress_ops *cops =
- f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
+ const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
- if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+ if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
return;
if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
@@ -1683,7 +1680,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);
- dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
+ dic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1691,6 +1688,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
+ dic->sbi = sbi;
+ dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
atomic_set(&dic->remaining_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
@@ -1704,7 +1703,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
- dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
+ dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
if (!dic->cpages) {
ret = -ENOMEM;
goto out_free;
@@ -1734,6 +1733,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
bool bypass_destroy_callback)
{
int i;
+ /* use sbi in dic to avoid UFA of dic->inode*/
+ struct f2fs_sb_info *sbi = dic->sbi;
f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
@@ -1745,7 +1746,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- page_array_free(dic->inode, dic->tpages, dic->cluster_size);
+ page_array_free(sbi, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@@ -1754,10 +1755,10 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
+ page_array_free(sbi, dic->cpages, dic->nr_cpages);
}
- page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
+ page_array_free(sbi, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
@@ -1776,8 +1777,7 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
f2fs_free_dic(dic, false);
} else {
INIT_WORK(&dic->free_work, f2fs_late_free_dic);
- queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
- &dic->free_work);
+ queue_work(dic->sbi->post_read_wq, &dic->free_work);
}
}
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 998ac993543e..a6b06ac2751d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1525,6 +1525,7 @@ struct compress_io_ctx {
struct decompress_io_ctx {
u32 magic; /* magic number to indicate page is compressed */
struct inode *inode; /* inode the context belong to */
+ struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
pgoff_t cluster_idx; /* cluster index number */
unsigned int cluster_size; /* page count in cluster */
unsigned int log_cluster_size; /* log of cluster size */
@@ -1565,6 +1566,7 @@ struct decompress_io_ctx {
bool failed; /* IO error occurred before decompression? */
bool need_verity; /* need fs-verity verification after decompression? */
+ unsigned char compress_algorithm; /* backup algorithm type */
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 67a93d45ad3a..6e09b80c0c37 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1805,6 +1805,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
GET_SUM_BLOCK(sbi, segno));
f2fs_put_page(sum_page, 0);
+ if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
+ f2fs_err(sbi, "%s: segment %u is used by log",
+ __func__, segno);
+ f2fs_bug_on(sbi, 1);
+ goto skip;
+ }
+
if (get_valid_blocks(sbi, segno, false) == 0)
goto freed;
if (gc_type == BG_GC && __is_large_section(sbi) &&
@@ -1815,7 +1822,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
sum = page_address(sum_page);
if (type != GET_SUM_TYPE((&sum->footer))) {
- f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
+ f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SIT and SSA",
segno, type, GET_SUM_TYPE((&sum->footer)));
f2fs_stop_checkpoint(sbi, false,
STOP_CP_REASON_CORRUPTED_SUMMARY);
@@ -2079,6 +2086,13 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
+ /*
+ * avoid migrating empty section, as it can be allocated by
+ * log in parallel.
+ */
+ if (!get_valid_blocks(sbi, segno, true))
+ continue;
+
if (IS_CURSEC(sbi, GET_SEC_FROM_SEG(sbi, segno)))
continue;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 397c96c25c31..0178292c1864 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1879,18 +1879,19 @@ static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
switch (wpc->iomap.type) {
- case IOMAP_INLINE:
- WARN_ON_ONCE(1);
- error = -EIO;
- break;
- case IOMAP_HOLE:
- break;
- default:
+ case IOMAP_UNWRITTEN:
+ case IOMAP_MAPPED:
error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
end_pos, map_len);
if (!error)
(*count)++;
break;
+ case IOMAP_HOLE:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ break;
}
dirty_len -= map_len;
pos += map_len;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 88b0fb343ae0..b02ea9fc812d 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -393,8 +393,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
- if (d_alias)
+ if (d_alias) {
+ if (d_is_dir(d_alias)) {
+ status = -EISDIR;
+ goto out_dput;
+ }
dentry = d_alias;
+ }
/* When we created the file with exclusive semantics, make
* sure we set the attributes afterwards. */
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b7bdb9b44440..8471371c6888 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -5809,9 +5809,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
- so->so_replay.rp_buflen = len;
- read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
+ if (len <= NFSD4_REPLAY_ISIZE) {
+ so->so_replay.rp_buflen = len;
+ read_bytes_from_xdr_buf(xdr->buf,
+ op_status_offset + XDR_UNIT,
so->so_replay.rp_buf, len);
+ } else {
+ so->so_replay.rp_buflen = 0;
+ }
}
status:
op->status = nfsd4_map_status(op->status,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index d80be50dad76..6b37d939a6ce 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -152,9 +152,19 @@ static int exports_net_open(struct net *net, struct file *file)
seq = file->private_data;
seq->private = nn->svc_export_cache;
+ get_net(net);
return 0;
}
+static int exports_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct cache_detail *cd = seq->private;
+
+ put_net(cd->net);
+ return seq_release(inode, file);
+}
+
static int exports_nfsd_open(struct inode *inode, struct file *file)
{
return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -164,7 +174,7 @@ static const struct file_operations exports_nfsd_operations = {
.open = exports_nfsd_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = exports_release,
};
static int export_features_show(struct seq_file *m, void *v)
@@ -1456,7 +1466,7 @@ static const struct proc_ops exports_proc_ops = {
.proc_open = exports_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
- .proc_release = seq_release,
+ .proc_release = exports_release,
};
static int create_proc_exports_entry(void)
@@ -2081,7 +2091,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
}
ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0,
- get_current_cred());
+ current_cred());
/* always save the latest error */
if (ret < 0)
err = ret;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 609487d35959..5b3a4473ed0a 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -473,11 +473,18 @@ struct nfs4_client_reclaim {
struct xdr_netobj cr_princhash;
};
-/* A reasonable value for REPLAY_ISIZE was estimated as follows:
- * The OPEN response, typically the largest, requires
- * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
- * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
- * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
+/*
+ * REPLAY_ISIZE is sized for an OPEN response with delegation:
+ * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) +
+ * 8(verifier) + 4(deleg. type) + 8(deleg. stateid) +
+ * 4(deleg. recall flag) + 20(deleg. space limit) +
+ * ~32(deleg. ace) = 112 bytes
+ *
+ * Some responses can exceed this. A LOCK denial includes the conflicting
+ * lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses
+ * larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is
+ * saved. Enlarging this constant increases the size of every
+ * nfs4_stateowner.
*/
#define NFSD4_REPLAY_ISIZE 112
diff --git a/fs/nsfs.c b/fs/nsfs.c
index c675fc40ce2d..0f4b0fed9265 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -12,6 +12,7 @@
#include <linux/user_namespace.h>
#include <linux/nsfs.h>
#include <linux/uaccess.h>
+#include <linux/capability.h>
#include <linux/mnt_namespace.h>
#include "mount.h"
@@ -152,6 +153,23 @@ static int copy_ns_info_to_user(const struct mnt_namespace *mnt_ns,
return 0;
}
+static bool may_see_all_namespaces(void)
+{
+ return (task_active_pid_ns(current) == &init_pid_ns) &&
+ ns_capable_noaudit(init_pid_ns.user_ns, CAP_SYS_ADMIN);
+}
+
+static bool may_use_nsfs_ioctl(unsigned int cmd)
+{
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(NS_MNT_GET_NEXT):
+ fallthrough;
+ case _IOC_NR(NS_MNT_GET_PREV):
+ return may_see_all_namespaces();
+ }
+ return true;
+}
+
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -165,6 +183,9 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
uid_t uid;
int ret;
+ if (!may_use_nsfs_ioctl(ioctl))
+ return -EPERM;
+
switch (ioctl) {
case NS_GET_USERNS:
return open_related_ns(ns, ns_get_owner);
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 7c61c1e944c7..179aaf7674c7 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -24,6 +24,7 @@
#include <linux/iov_iter.h>
#include "../common/arc4.h"
#include <crypto/aead.h>
+#include <crypto/utils.h>
static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2)
@@ -257,7 +258,7 @@ int cifs_verify_signature(struct smb_rqst *rqst,
/* cifs_dump_mem("what we think it should be: ",
what_we_think_sig_should_be, 16); */
- if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
+ if (crypto_memneq(server_response_sig, what_we_think_sig_should_be, 8))
return -EACCES;
else
return 0;
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 1187b0240a44..ce3af62ddaf4 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -291,10 +291,14 @@ static void cifs_kill_sb(struct super_block *sb)
/*
* We need to release all dentries for the cached directories
- * before we kill the sb.
+ * and close all deferred file handles before we kill the sb.
*/
if (cifs_sb->root) {
close_all_cached_dirs(cifs_sb);
+ cifs_close_all_deferred_files_sb(cifs_sb);
+
+ /* Wait for all pending oplock breaks to complete */
+ flush_workqueue(cifsoplockd_wq);
/* finally release root dentry */
dput(cifs_sb->root);
@@ -799,7 +803,6 @@ static void cifs_umount_begin(struct super_block *sb)
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_close_all_deferred_files(tcon);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
/* cancel_notify_requests(tcon); */
if (tcon->ses && tcon->ses->server) {
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 6a35e884b41f..dcfadbc20723 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -20,6 +20,7 @@
#include <linux/utsname.h>
#include <linux/sched/mm.h>
#include <linux/netfs.h>
+#include <linux/fcntl.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
@@ -2317,4 +2318,14 @@ static inline bool cifs_ses_exiting(struct cifs_ses *ses)
return ret;
}
+static inline int cifs_open_create_options(unsigned int oflags, int opts)
+{
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (oflags & O_SYNC)
+ opts |= CREATE_WRITE_THROUGH;
+ if (oflags & O_DIRECT)
+ opts |= CREATE_NO_BUFFER;
+ return opts;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index b59647291363..d2d004764a70 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -298,6 +298,7 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+void cifs_close_all_deferred_files_sb(struct cifs_sb_info *cifs_sb);
void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
struct dentry *dentry);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 823b4e1ce2d0..d801e0288a6d 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1906,6 +1906,10 @@ static int match_session(struct cifs_ses *ses,
case Kerberos:
if (!uid_eq(ctx->cred_uid, ses->cred_uid))
return 0;
+ if (strncmp(ses->user_name ?: "",
+ ctx->username ?: "",
+ CIFS_MAX_USERNAME_LEN))
+ return 0;
break;
case NTLMv2:
case RawNTLMSSP:
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 1822493dd084..6bb0e98462a9 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -304,6 +304,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
goto out;
}
+ create_options |= cifs_open_create_options(oflags, create_options);
/*
* if we're not using unix extensions, see if we need to set
* ATTR_READONLY on the create call
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 166dc8fd06c0..8cf3cb61336c 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -570,15 +570,8 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
*********************************************************************/
disposition = cifs_get_disposition(f_flags);
-
/* BB pass O_SYNC flag through on file attributes .. BB */
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
+ create_options |= cifs_open_create_options(f_flags, create_options);
retry_open:
oparms = (struct cifs_open_parms) {
@@ -697,8 +690,6 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
- cifs_sb_active(inode->i_sb);
-
/*
* If the server returned a read oplock and we have mandatory brlocks,
* set oplock level to None.
@@ -753,7 +744,6 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
struct inode *inode = d_inode(cifs_file->dentry);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsLockInfo *li, *tmp;
- struct super_block *sb = inode->i_sb;
/*
* Delete any outstanding lock records. We'll lose them when the file
@@ -771,7 +761,6 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
cifs_put_tlink(cifs_file->tlink);
dput(cifs_file->dentry);
- cifs_sb_deactive(sb);
kfree(cifs_file->symlink_target);
kfree(cifs_file);
}
@@ -1228,13 +1217,8 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
rdwr_for_fscache = 1;
desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (cfile->f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (cfile->f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
+ create_options |= cifs_open_create_options(cfile->f_flags,
+ create_options);
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
@@ -3087,12 +3071,6 @@ void cifs_oplock_break(struct work_struct *work)
__u64 persistent_fid, volatile_fid;
__u16 net_fid;
- /*
- * Hold a reference to the superblock to prevent it and its inodes from
- * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
- * may release the last reference to the sb and trigger inode eviction.
- */
- cifs_sb_active(sb);
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@@ -3165,7 +3143,6 @@ void cifs_oplock_break(struct work_struct *work)
cifs_put_tlink(tlink);
out:
cifs_done_oplock_break(cinode);
- cifs_sb_deactive(sb);
}
static int cifs_swap_activate(struct swap_info_struct *sis,
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index ee9c95811477..39e48e86b9b4 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1847,7 +1847,7 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->backupuid_specified = false; /* no backup intent for a user */
ctx->backupgid_specified = false; /* no backup intent for a group */
- ctx->retrans = 1;
+ ctx->retrans = 0;
ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
/*
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 3f3f184f7fb9..6abfa22f4c17 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -27,6 +27,11 @@
#include "fs_context.h"
#include "cached_dir.h"
+struct tcon_list {
+ struct list_head entry;
+ struct cifs_tcon *tcon;
+};
+
/* The xid serves as a useful identifier for each incoming vfs request,
in a similar way to the mid which is useful to track each sent smb,
and CurrentXid can also provide a running counter (although it
@@ -830,6 +835,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
}
}
+void cifs_close_all_deferred_files_sb(struct cifs_sb_info *cifs_sb)
+{
+ struct rb_root *root = &cifs_sb->tlink_tree;
+ struct rb_node *node;
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+ struct tcon_list *tmp_list, *q;
+ LIST_HEAD(tcon_head);
+
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ tcon = tlink_tcon(tlink);
+ if (IS_ERR(tcon))
+ continue;
+ tmp_list = kmalloc(sizeof(struct tcon_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->tcon = tcon;
+ /* Take a reference on tcon to prevent it from being freed */
+ spin_lock(&tcon->tc_lock);
+ ++tcon->tc_count;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_close_defer_files);
+ spin_unlock(&tcon->tc_lock);
+ list_add_tail(&tmp_list->entry, &tcon_head);
+ }
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ list_for_each_entry_safe(tmp_list, q, &tcon_head, entry) {
+ cifs_close_all_deferred_files(tmp_list->tcon);
+ list_del(&tmp_list->entry);
+ cifs_put_tcon(tmp_list->tcon, netfs_trace_tcon_ref_put_close_defer_files);
+ kfree(tmp_list);
+ }
+}
+
void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
struct dentry *dentry)
{
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index ab7e4c9f556f..44001b1ab79b 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -628,6 +628,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
struct iface_info_ipv6 *p6;
struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
struct cifs_server_iface tmp_iface;
+ __be16 port;
ssize_t bytes_left;
size_t next = 0;
int nb_iface = 0;
@@ -662,6 +663,15 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
goto out;
}
+ spin_lock(&ses->server->srv_lock);
+ if (ses->server->dstaddr.ss_family == AF_INET)
+ port = ((struct sockaddr_in *)&ses->server->dstaddr)->sin_port;
+ else if (ses->server->dstaddr.ss_family == AF_INET6)
+ port = ((struct sockaddr_in6 *)&ses->server->dstaddr)->sin6_port;
+ else
+ port = cpu_to_be16(CIFS_PORT);
+ spin_unlock(&ses->server->srv_lock);
+
while (bytes_left >= (ssize_t)sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface));
/* default to 1Gbps when link speed is unset */
@@ -682,7 +692,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
- addr4->sin_port = cpu_to_be16(CIFS_PORT);
+ addr4->sin_port = port;
cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
&addr4->sin_addr);
@@ -696,7 +706,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
addr6->sin6_flowinfo = 0;
addr6->sin6_scope_id = 0;
- addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+ addr6->sin6_port = port;
cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
&addr6->sin6_addr);
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index b6821815248e..380890702088 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -5163,7 +5163,10 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
- rqst.rq_nvec = n_vec + 1;
+ /* iov[0] is the SMB header; move payload to rq_iter for encryption safety */
+ rqst.rq_nvec = 1;
+ iov_iter_kvec(&rqst.rq_iter, ITER_SOURCE, &iov[1], n_vec,
+ io_parms->length);
if (retries)
smb2_set_replay(server, &rqst);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 475b36c27f65..87f189894b1e 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -19,6 +19,7 @@
#include <linux/mempool.h>
#include <linux/highmem.h>
#include <crypto/aead.h>
+#include <crypto/utils.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "smb2proto.h"
@@ -732,7 +733,8 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (rc)
return rc;
- if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) {
+ if (crypto_memneq(server_response_sig, shdr->Signature,
+ SMB2_SIGNATURE_SIZE)) {
cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n",
shdr->Command, shdr->MessageId);
return -EACCES;
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index fb9c631e0ec1..547d1e5d000a 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -47,6 +47,7 @@
EM(netfs_trace_tcon_ref_get_cached_laundromat, "GET Ch-Lau") \
EM(netfs_trace_tcon_ref_get_cached_lease_break, "GET Ch-Lea") \
EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
+ EM(netfs_trace_tcon_ref_get_close_defer_files, "GET Cl-Def") \
EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \
@@ -58,6 +59,7 @@
EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
+ EM(netfs_trace_tcon_ref_put_close_defer_files, "PUT Cl-Def") \
EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \
EM(netfs_trace_tcon_ref_put_dfs_refer, "PUT DfsRfr") \
EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \
diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
index cabe6a843c6a..da0187e76cf1 100644
--- a/fs/smb/server/Kconfig
+++ b/fs/smb/server/Kconfig
@@ -11,6 +11,7 @@ config SMB_SERVER
select CRYPTO_HMAC
select CRYPTO_ECB
select CRYPTO_LIB_DES
+ select CRYPTO_LIB_UTILS
select CRYPTO_SHA256
select CRYPTO_CMAC
select CRYPTO_SHA512
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index b3d121052408..c12dcb0a47dd 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -13,6 +13,7 @@
#include <linux/xattr.h>
#include <crypto/hash.h>
#include <crypto/aead.h>
+#include <crypto/utils.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
@@ -283,7 +284,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
goto out;
}
- if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+ if (crypto_memneq(ntlmv2->ntlmv2_hash, ntlmv2_rsp,
+ CIFS_HMAC_MD5_HASH_SIZE))
rc = -EINVAL;
out:
if (ctx)
@@ -801,12 +803,8 @@ static int generate_smb3signingkey(struct ksmbd_session *sess,
if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
- ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
+ ksmbd_debug(AUTH, "generated SMB3 signing key\n");
ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
- ksmbd_debug(AUTH, "Session Key %*ph\n",
- SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
- ksmbd_debug(AUTH, "Signing Key %*ph\n",
- SMB3_SIGN_KEY_SIZE, key);
return 0;
}
@@ -870,23 +868,9 @@ static int generate_smb3encryptionkey(struct ksmbd_conn *conn,
if (rc)
return rc;
- ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
+ ksmbd_debug(AUTH, "generated SMB3 encryption/decryption keys\n");
ksmbd_debug(AUTH, "Cipher type %d\n", conn->cipher_type);
ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
- ksmbd_debug(AUTH, "Session Key %*ph\n",
- SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
- if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
- conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
- ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
- SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
- ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
- SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
- } else {
- ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
- SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
- ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
- SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
- }
return 0;
}
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index a04d5702820d..228166c47d8c 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -120,7 +120,7 @@ static void free_lease(struct oplock_info *opinfo)
kfree(lease);
}
-static void free_opinfo(struct oplock_info *opinfo)
+static void __free_opinfo(struct oplock_info *opinfo)
{
if (opinfo->is_lease)
free_lease(opinfo);
@@ -129,6 +129,18 @@ static void free_opinfo(struct oplock_info *opinfo)
kfree(opinfo);
}
+static void free_opinfo_rcu(struct rcu_head *rcu)
+{
+ struct oplock_info *opinfo = container_of(rcu, struct oplock_info, rcu);
+
+ __free_opinfo(opinfo);
+}
+
+static void free_opinfo(struct oplock_info *opinfo)
+{
+ call_rcu(&opinfo->rcu, free_opinfo_rcu);
+}
+
struct oplock_info *opinfo_get(struct ksmbd_file *fp)
{
struct oplock_info *opinfo;
@@ -176,9 +188,9 @@ void opinfo_put(struct oplock_info *opinfo)
free_opinfo(opinfo);
}
-static void opinfo_add(struct oplock_info *opinfo)
+static void opinfo_add(struct oplock_info *opinfo, struct ksmbd_file *fp)
{
- struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+ struct ksmbd_inode *ci = fp->f_ci;
down_write(&ci->m_lock);
list_add(&opinfo->op_entry, &ci->m_op_list);
@@ -1123,10 +1135,12 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
rcu_read_lock();
opinfo = rcu_dereference(fp->f_opinfo);
- rcu_read_unlock();
- if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2)
+ if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2) {
+ rcu_read_unlock();
return;
+ }
+ rcu_read_unlock();
p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
if (!p_ci)
@@ -1277,20 +1291,21 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
set_oplock_level(opinfo, req_op_level, lctx);
out:
- rcu_assign_pointer(fp->f_opinfo, opinfo);
- opinfo->o_fp = fp;
-
opinfo_count_inc(fp);
- opinfo_add(opinfo);
+ opinfo_add(opinfo, fp);
+
if (opinfo->is_lease) {
err = add_lease_global_list(opinfo);
if (err)
goto err_out;
}
+ rcu_assign_pointer(fp->f_opinfo, opinfo);
+ opinfo->o_fp = fp;
+
return 0;
err_out:
- free_opinfo(opinfo);
+ __free_opinfo(opinfo);
return err;
}
diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
index 9a56eaadd0dd..921e3199e4df 100644
--- a/fs/smb/server/oplock.h
+++ b/fs/smb/server/oplock.h
@@ -69,8 +69,9 @@ struct oplock_info {
struct lease *o_lease;
struct list_head op_entry;
struct list_head lease_entry;
- wait_queue_head_t oplock_q; /* Other server threads */
- wait_queue_head_t oplock_brk; /* oplock breaking wait */
+ wait_queue_head_t oplock_q; /* Other server threads */
+ wait_queue_head_t oplock_brk; /* oplock breaking wait */
+ struct rcu_head rcu;
};
struct lease_break_info {
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 0d7ba57c1ca6..63c092328752 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -4,6 +4,7 @@
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
+#include <crypto/utils.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <linux/syscalls.h>
@@ -123,6 +124,8 @@ int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
pr_err("The first operation in the compound does not have tcon\n");
return -EINVAL;
}
+ if (work->tcon->t_state != TREE_CONNECTED)
+ return -ENOENT;
if (tree_id != UINT_MAX && work->tcon->id != tree_id) {
pr_err("tree id(%u) is different with id(%u) in first operation\n",
tree_id, work->tcon->id);
@@ -1953,6 +1956,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
}
}
smb2_set_err_rsp(work);
+ conn->binding = false;
} else {
unsigned int iov_len;
@@ -2831,7 +2835,11 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
- dh_info->fp->conn = conn;
+ if (dh_info->fp->conn) {
+ ksmbd_put_durable_fd(dh_info->fp);
+ err = -EBADF;
+ goto out;
+ }
dh_info->reconnected = true;
goto out;
}
@@ -3012,13 +3020,14 @@ int smb2_open(struct ksmbd_work *work)
goto err_out2;
}
+ fp = dh_info.fp;
+
if (ksmbd_override_fsids(work)) {
rc = -ENOMEM;
ksmbd_put_durable_fd(dh_info.fp);
goto err_out2;
}
- fp = dh_info.fp;
file_info = FILE_OPENED;
rc = ksmbd_vfs_getattr(&fp->filp->f_path, &stat);
@@ -3614,10 +3623,8 @@ int smb2_open(struct ksmbd_work *work)
reconnected_fp:
rsp->StructureSize = cpu_to_le16(89);
- rcu_read_lock();
- opinfo = rcu_dereference(fp->f_opinfo);
+ opinfo = opinfo_get(fp);
rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
- rcu_read_unlock();
rsp->Flags = 0;
rsp->CreateAction = cpu_to_le32(file_info);
rsp->CreationTime = cpu_to_le64(fp->create_time);
@@ -3658,6 +3665,7 @@ int smb2_open(struct ksmbd_work *work)
next_ptr = &lease_ccontext->Next;
next_off = conn->vals->create_lease_size;
}
+ opinfo_put(opinfo);
if (maximal_access_ctxt) {
struct create_context *mxac_ccontext;
@@ -5436,7 +5444,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct smb2_query_info_req *req,
struct smb2_query_info_rsp *rsp)
{
- struct ksmbd_session *sess = work->sess;
struct ksmbd_conn *conn = work->conn;
struct ksmbd_share_config *share = work->tcon->share_conf;
int fsinfoclass = 0;
@@ -5566,10 +5573,11 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info = (struct object_id_info *)(rsp->Buffer);
- if (!user_guest(sess->user))
- memcpy(info->objid, user_passkey(sess->user), 16);
+ if (path.mnt->mnt_sb->s_uuid_len == 16)
+ memcpy(info->objid, path.mnt->mnt_sb->s_uuid.b,
+ path.mnt->mnt_sb->s_uuid_len);
else
- memset(info->objid, 0, 16);
+ memcpy(info->objid, &stfs.f_fsid, sizeof(stfs.f_fsid));
info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC);
info->extended_info.version = cpu_to_le32(1);
@@ -6089,14 +6097,14 @@ static int smb2_create_link(struct ksmbd_work *work,
rc = -EINVAL;
ksmbd_debug(SMB, "cannot delete %s\n",
link_name);
- goto out;
}
} else {
rc = -EEXIST;
ksmbd_debug(SMB, "link already exists\n");
- goto out;
}
ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+ if (rc)
+ goto out;
}
rc = ksmbd_vfs_link(work, target_name, link_name);
if (rc)
@@ -8825,7 +8833,7 @@ int smb2_check_sign_req(struct ksmbd_work *work)
signature))
return 0;
- if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+ if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
pr_err("bad smb2 signature\n");
return 0;
}
@@ -8913,7 +8921,7 @@ int smb3_check_sign_req(struct ksmbd_work *work)
if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature))
return 0;
- if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+ if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
pr_err("bad smb2 signature\n");
return 0;
}
diff --git a/fs/tests/exec_kunit.c b/fs/tests/exec_kunit.c
index f412d1a0f6bb..1c32cac098cf 100644
--- a/fs/tests/exec_kunit.c
+++ b/fs/tests/exec_kunit.c
@@ -94,9 +94,6 @@ static const struct bprm_stack_limits_result bprm_stack_limits_results[] = {
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3 + sizeof(void *)),
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
- { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * + sizeof(void *)),
- .argc = 0, .envc = 0 },
- .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
{ { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM,
.argc = 0, .envc = 0 },
.expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 2cd212ad2c1d..4b99834fcae8 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -809,7 +809,7 @@ xfs_defer_can_append(
/* Paused items cannot absorb more work */
if (dfp->dfp_flags & XFS_DEFER_PAUSED)
- return NULL;
+ return false;
/* Already full? */
if (ops->max_items && dfp->dfp_count >= ops->max_items)
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index a93686df6f67..321009741e58 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -1720,7 +1720,6 @@ xrep_agi(
{
struct xrep_agi *ragi;
struct xfs_mount *mp = sc->mp;
- char *descr;
unsigned int i;
int error;
@@ -1754,17 +1753,13 @@ xrep_agi(
xagino_bitmap_init(&ragi->iunlink_bmp);
sc->buf_cleanup = xrep_agi_buf_cleanup;
- descr = xchk_xfile_ag_descr(sc, "iunlinked next pointers");
- error = xfarray_create(descr, 0, sizeof(xfs_agino_t),
- &ragi->iunlink_next);
- kfree(descr);
+ error = xfarray_create("iunlinked next pointers", 0,
+ sizeof(xfs_agino_t), &ragi->iunlink_next);
if (error)
return error;
- descr = xchk_xfile_ag_descr(sc, "iunlinked prev pointers");
- error = xfarray_create(descr, 0, sizeof(xfs_agino_t),
- &ragi->iunlink_prev);
- kfree(descr);
+ error = xfarray_create("iunlinked prev pointers", 0,
+ sizeof(xfs_agino_t), &ragi->iunlink_prev);
if (error)
return error;
diff --git a/fs/xfs/scrub/alloc_repair.c b/fs/xfs/scrub/alloc_repair.c
index b43b45fec4f5..1fbe9c8740fa 100644
--- a/fs/xfs/scrub/alloc_repair.c
+++ b/fs/xfs/scrub/alloc_repair.c
@@ -849,7 +849,6 @@ xrep_allocbt(
{
struct xrep_abt *ra;
struct xfs_mount *mp = sc->mp;
- char *descr;
int error;
/* We require the rmapbt to rebuild anything. */
@@ -875,11 +874,9 @@ xrep_allocbt(
}
/* Set up enough storage to handle maximally fragmented free space. */
- descr = xchk_xfile_ag_descr(sc, "free space records");
- error = xfarray_create(descr, mp->m_sb.sb_agblocks / 2,
+ error = xfarray_create("free space records", mp->m_sb.sb_agblocks / 2,
sizeof(struct xfs_alloc_rec_incore),
&ra->free_records);
- kfree(descr);
if (error)
goto out_ra;
diff --git a/fs/xfs/scrub/attr_repair.c b/fs/xfs/scrub/attr_repair.c
index 50ab476dddb9..dd24044c44ef 100644
--- a/fs/xfs/scrub/attr_repair.c
+++ b/fs/xfs/scrub/attr_repair.c
@@ -1531,7 +1531,6 @@ xrep_xattr_setup_scan(
struct xrep_xattr **rxp)
{
struct xrep_xattr *rx;
- char *descr;
int max_len;
int error;
@@ -1557,35 +1556,26 @@ xrep_xattr_setup_scan(
goto out_rx;
/* Set up some staging for salvaged attribute keys and values */
- descr = xchk_xfile_ino_descr(sc, "xattr keys");
- error = xfarray_create(descr, 0, sizeof(struct xrep_xattr_key),
+ error = xfarray_create("xattr keys", 0, sizeof(struct xrep_xattr_key),
&rx->xattr_records);
- kfree(descr);
if (error)
goto out_rx;
- descr = xchk_xfile_ino_descr(sc, "xattr names");
- error = xfblob_create(descr, &rx->xattr_blobs);
- kfree(descr);
+ error = xfblob_create("xattr names", &rx->xattr_blobs);
if (error)
goto out_keys;
if (xfs_has_parent(sc->mp)) {
ASSERT(sc->flags & XCHK_FSGATES_DIRENTS);
- descr = xchk_xfile_ino_descr(sc,
- "xattr retained parent pointer entries");
- error = xfarray_create(descr, 0,
+ error = xfarray_create("xattr parent pointer entries", 0,
sizeof(struct xrep_xattr_pptr),
&rx->pptr_recs);
- kfree(descr);
if (error)
goto out_values;
- descr = xchk_xfile_ino_descr(sc,
- "xattr retained parent pointer names");
- error = xfblob_create(descr, &rx->pptr_names);
- kfree(descr);
+ error = xfblob_create("xattr parent pointer names",
+ &rx->pptr_names);
if (error)
goto out_pprecs;
diff --git a/fs/xfs/scrub/bmap_repair.c b/fs/xfs/scrub/bmap_repair.c
index 4505f4829d53..9688b0aa1b8f 100644
--- a/fs/xfs/scrub/bmap_repair.c
+++ b/fs/xfs/scrub/bmap_repair.c
@@ -800,7 +800,6 @@ xrep_bmap(
bool allow_unwritten)
{
struct xrep_bmap *rb;
- char *descr;
xfs_extnum_t max_bmbt_recs;
bool large_extcount;
int error = 0;
@@ -822,11 +821,8 @@ xrep_bmap(
/* Set up enough storage to handle the max records for this fork. */
large_extcount = xfs_has_large_extent_counts(sc->mp);
max_bmbt_recs = xfs_iext_max_nextents(large_extcount, whichfork);
- descr = xchk_xfile_ino_descr(sc, "%s fork mapping records",
- whichfork == XFS_DATA_FORK ? "data" : "attr");
- error = xfarray_create(descr, max_bmbt_recs,
+ error = xfarray_create("fork mapping records", max_bmbt_recs,
sizeof(struct xfs_bmbt_rec), &rb->bmap_records);
- kfree(descr);
if (error)
goto out_rb;
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index eb00d48590f2..bd74ac23b72e 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -201,24 +201,6 @@ static inline bool xchk_could_repair(const struct xfs_scrub *sc)
int xchk_metadata_inode_forks(struct xfs_scrub *sc);
-/*
- * Helper macros to allocate and format xfile description strings.
- * Callers must kfree the pointer returned.
- */
-#define xchk_xfile_descr(sc, fmt, ...) \
- kasprintf(XCHK_GFP_FLAGS, "XFS (%s): " fmt, \
- (sc)->mp->m_super->s_id, ##__VA_ARGS__)
-#define xchk_xfile_ag_descr(sc, fmt, ...) \
- kasprintf(XCHK_GFP_FLAGS, "XFS (%s): AG 0x%x " fmt, \
- (sc)->mp->m_super->s_id, \
- (sc)->sa.pag ? (sc)->sa.pag->pag_agno : (sc)->sm->sm_agno, \
- ##__VA_ARGS__)
-#define xchk_xfile_ino_descr(sc, fmt, ...) \
- kasprintf(XCHK_GFP_FLAGS, "XFS (%s): inode 0x%llx " fmt, \
- (sc)->mp->m_super->s_id, \
- (sc)->ip ? (sc)->ip->i_ino : (sc)->sm->sm_ino, \
- ##__VA_ARGS__)
-
/*
* Setting up a hook to wait for intents to drain is costly -- we have to take
* the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index bf9199e8df63..eba6799dd183 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -1094,22 +1094,17 @@ xchk_directory(
sd->xname.name = sd->namebuf;
if (xfs_has_parent(sc->mp)) {
- char *descr;
-
/*
* Set up some staging memory for dirents that we can't check
* due to locking contention.
*/
- descr = xchk_xfile_ino_descr(sc, "slow directory entries");
- error = xfarray_create(descr, 0, sizeof(struct xchk_dirent),
- &sd->dir_entries);
- kfree(descr);
+ error = xfarray_create("slow directory entries", 0,
+ sizeof(struct xchk_dirent), &sd->dir_entries);
if (error)
goto out_sd;
- descr = xchk_xfile_ino_descr(sc, "slow directory entry names");
- error = xfblob_create(descr, &sd->dir_names);
- kfree(descr);
+ error = xfblob_create("slow directory entry names",
+ &sd->dir_names);
if (error)
goto out_entries;
}
diff --git a/fs/xfs/scrub/dir_repair.c b/fs/xfs/scrub/dir_repair.c
index a9e2aa5c3968..29c48c4db53b 100644
--- a/fs/xfs/scrub/dir_repair.c
+++ b/fs/xfs/scrub/dir_repair.c
@@ -1782,20 +1782,15 @@ xrep_dir_setup_scan(
struct xrep_dir *rd)
{
struct xfs_scrub *sc = rd->sc;
- char *descr;
int error;
/* Set up some staging memory for salvaging dirents. */
- descr = xchk_xfile_ino_descr(sc, "directory entries");
- error = xfarray_create(descr, 0, sizeof(struct xrep_dirent),
- &rd->dir_entries);
- kfree(descr);
+ error = xfarray_create("directory entries", 0,
+ sizeof(struct xrep_dirent), &rd->dir_entries);
if (error)
return error;
- descr = xchk_xfile_ino_descr(sc, "directory entry names");
- error = xfblob_create(descr, &rd->dir_names);
- kfree(descr);
+ error = xfblob_create("directory entry names", &rd->dir_names);
if (error)
goto out_xfarray;
diff --git a/fs/xfs/scrub/dirtree.c b/fs/xfs/scrub/dirtree.c
index 73d40a2600a2..709b91d023b7 100644
--- a/fs/xfs/scrub/dirtree.c
+++ b/fs/xfs/scrub/dirtree.c
@@ -96,7 +96,6 @@ xchk_setup_dirtree(
struct xfs_scrub *sc)
{
struct xchk_dirtree *dl;
- char *descr;
int error;
xchk_fsgates_enable(sc, XCHK_FSGATES_DIRENTS);
@@ -120,16 +119,12 @@ xchk_setup_dirtree(
mutex_init(&dl->lock);
- descr = xchk_xfile_ino_descr(sc, "dirtree path steps");
- error = xfarray_create(descr, 0, sizeof(struct xchk_dirpath_step),
- &dl->path_steps);
- kfree(descr);
+ error = xfarray_create("dirtree path steps", 0,
+ sizeof(struct xchk_dirpath_step), &dl->path_steps);
if (error)
goto out_dl;
- descr = xchk_xfile_ino_descr(sc, "dirtree path names");
- error = xfblob_create(descr, &dl->path_names);
- kfree(descr);
+ error = xfblob_create("dirtree path names", &dl->path_names);
if (error)
goto out_steps;
diff --git a/fs/xfs/scrub/ialloc_repair.c b/fs/xfs/scrub/ialloc_repair.c
index 6cb2c9d11509..6a2b248f5536 100644
--- a/fs/xfs/scrub/ialloc_repair.c
+++ b/fs/xfs/scrub/ialloc_repair.c
@@ -804,7 +804,6 @@ xrep_iallocbt(
{
struct xrep_ibt *ri;
struct xfs_mount *mp = sc->mp;
- char *descr;
xfs_agino_t first_agino, last_agino;
int error = 0;
@@ -823,11 +822,9 @@ xrep_iallocbt(
/* Set up enough storage to handle an AG with nothing but inodes. */
xfs_agino_range(mp, sc->sa.pag->pag_agno, &first_agino, &last_agino);
last_agino /= XFS_INODES_PER_CHUNK;
- descr = xchk_xfile_ag_descr(sc, "inode index records");
- error = xfarray_create(descr, last_agino,
+ error = xfarray_create("inode index records", last_agino,
sizeof(struct xfs_inobt_rec_incore),
&ri->inode_records);
- kfree(descr);
if (error)
goto out_ri;
diff --git a/fs/xfs/scrub/nlinks.c b/fs/xfs/scrub/nlinks.c
index 7e917a5e9ca2..d386ee9e1364 100644
--- a/fs/xfs/scrub/nlinks.c
+++ b/fs/xfs/scrub/nlinks.c
@@ -995,7 +995,6 @@ xchk_nlinks_setup_scan(
struct xchk_nlink_ctrs *xnc)
{
struct xfs_mount *mp = sc->mp;
- char *descr;
unsigned long long max_inos;
xfs_agnumber_t last_agno = mp->m_sb.sb_agcount - 1;
xfs_agino_t first_agino, last_agino;
@@ -1012,10 +1011,9 @@ xchk_nlinks_setup_scan(
*/
xfs_agino_range(mp, last_agno, &first_agino, &last_agino);
max_inos = XFS_AGINO_TO_INO(mp, last_agno, last_agino) + 1;
- descr = xchk_xfile_descr(sc, "file link counts");
- error = xfarray_create(descr, min(XFS_MAXINUMBER + 1, max_inos),
+ error = xfarray_create("file link counts",
+ min(XFS_MAXINUMBER + 1, max_inos),
sizeof(struct xchk_nlink), &xnc->nlinks);
- kfree(descr);
if (error)
goto out_teardown;
diff --git a/fs/xfs/scrub/orphanage.c b/fs/xfs/scrub/orphanage.c
index 7148d8362db8..46171f61eda4 100644
--- a/fs/xfs/scrub/orphanage.c
+++ b/fs/xfs/scrub/orphanage.c
@@ -443,6 +443,11 @@ xrep_adoption_check_dcache(
return 0;
d_child = d_hash_and_lookup(d_orphanage, &qname);
+ if (IS_ERR(d_child)) {
+ dput(d_orphanage);
+ return PTR_ERR(d_child);
+ }
+
if (d_child) {
trace_xrep_adoption_check_child(sc->mp, d_child);
@@ -480,7 +485,7 @@ xrep_adoption_zap_dcache(
return;
d_child = d_hash_and_lookup(d_orphanage, &qname);
- while (d_child != NULL) {
+ while (!IS_ERR_OR_NULL(d_child)) {
trace_xrep_adoption_invalidate_child(sc->mp, d_child);
ASSERT(d_is_negative(d_child));
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index 91e7b51ce068..a82d1fb67716 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -733,7 +733,6 @@ xchk_parent_pptr(
struct xfs_scrub *sc)
{
struct xchk_pptrs *pp;
- char *descr;
int error;
pp = kvzalloc(sizeof(struct xchk_pptrs), XCHK_GFP_FLAGS);
@@ -746,16 +745,12 @@ xchk_parent_pptr(
* Set up some staging memory for parent pointers that we can't check
* due to locking contention.
*/
- descr = xchk_xfile_ino_descr(sc, "slow parent pointer entries");
- error = xfarray_create(descr, 0, sizeof(struct xchk_pptr),
- &pp->pptr_entries);
- kfree(descr);
+ error = xfarray_create("slow parent pointer entries", 0,
+ sizeof(struct xchk_pptr), &pp->pptr_entries);
if (error)
goto out_pp;
- descr = xchk_xfile_ino_descr(sc, "slow parent pointer names");
- error = xfblob_create(descr, &pp->pptr_names);
- kfree(descr);
+ error = xfblob_create("slow parent pointer names", &pp->pptr_names);
if (error)
goto out_entries;
diff --git a/fs/xfs/scrub/parent_repair.c b/fs/xfs/scrub/parent_repair.c
index 7b42b7f65a0b..66ce96319201 100644
--- a/fs/xfs/scrub/parent_repair.c
+++ b/fs/xfs/scrub/parent_repair.c
@@ -1476,7 +1476,6 @@ xrep_parent_setup_scan(
struct xrep_parent *rp)
{
struct xfs_scrub *sc = rp->sc;
- char *descr;
struct xfs_da_geometry *geo = sc->mp->m_attr_geo;
int max_len;
int error;
@@ -1504,32 +1503,22 @@ xrep_parent_setup_scan(
goto out_xattr_name;
/* Set up some staging memory for logging parent pointer updates. */
- descr = xchk_xfile_ino_descr(sc, "parent pointer entries");
- error = xfarray_create(descr, 0, sizeof(struct xrep_pptr),
- &rp->pptr_recs);
- kfree(descr);
+ error = xfarray_create("parent pointer entries", 0,
+ sizeof(struct xrep_pptr), &rp->pptr_recs);
if (error)
goto out_xattr_value;
- descr = xchk_xfile_ino_descr(sc, "parent pointer names");
- error = xfblob_create(descr, &rp->pptr_names);
- kfree(descr);
+ error = xfblob_create("parent pointer names", &rp->pptr_names);
if (error)
goto out_recs;
/* Set up some storage for copying attrs before the mapping exchange */
- descr = xchk_xfile_ino_descr(sc,
- "parent pointer retained xattr entries");
- error = xfarray_create(descr, 0, sizeof(struct xrep_parent_xattr),
- &rp->xattr_records);
- kfree(descr);
+ error = xfarray_create("parent pointer xattr entries", 0,
+ sizeof(struct xrep_parent_xattr), &rp->xattr_records);
if (error)
goto out_names;
- descr = xchk_xfile_ino_descr(sc,
- "parent pointer retained xattr values");
- error = xfblob_create(descr, &rp->xattr_blobs);
- kfree(descr);
+ error = xfblob_create("parent pointer xattr values", &rp->xattr_blobs);
if (error)
goto out_attr_keys;
diff --git a/fs/xfs/scrub/quotacheck.c b/fs/xfs/scrub/quotacheck.c
index c77eb2de8df7..28309f9f25e4 100644
--- a/fs/xfs/scrub/quotacheck.c
+++ b/fs/xfs/scrub/quotacheck.c
@@ -741,7 +741,6 @@ xqcheck_setup_scan(
struct xfs_scrub *sc,
struct xqcheck *xqc)
{
- char *descr;
struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
unsigned long long max_dquots = XFS_DQ_ID_MAX + 1ULL;
int error;
@@ -756,28 +755,22 @@ xqcheck_setup_scan(
error = -ENOMEM;
if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_USER)) {
- descr = xchk_xfile_descr(sc, "user dquot records");
- error = xfarray_create(descr, max_dquots,
+ error = xfarray_create("user dquot records", max_dquots,
sizeof(struct xqcheck_dquot), &xqc->ucounts);
- kfree(descr);
if (error)
goto out_teardown;
}
if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_GROUP)) {
- descr = xchk_xfile_descr(sc, "group dquot records");
- error = xfarray_create(descr, max_dquots,
+ error = xfarray_create("group dquot records", max_dquots,
sizeof(struct xqcheck_dquot), &xqc->gcounts);
- kfree(descr);
if (error)
goto out_teardown;
}
if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_PROJ)) {
- descr = xchk_xfile_descr(sc, "project dquot records");
- error = xfarray_create(descr, max_dquots,
+ error = xfarray_create("project dquot records", max_dquots,
sizeof(struct xqcheck_dquot), &xqc->pcounts);
- kfree(descr);
if (error)
goto out_teardown;
}
diff --git a/fs/xfs/scrub/refcount_repair.c b/fs/xfs/scrub/refcount_repair.c
index a00d7ce7ae5b..1512e7363de1 100644
--- a/fs/xfs/scrub/refcount_repair.c
+++ b/fs/xfs/scrub/refcount_repair.c
@@ -123,13 +123,7 @@ int
xrep_setup_ag_refcountbt(
struct xfs_scrub *sc)
{
- char *descr;
- int error;
-
- descr = xchk_xfile_ag_descr(sc, "rmap record bag");
- error = xrep_setup_xfbtree(sc, descr);
- kfree(descr);
- return error;
+ return xrep_setup_xfbtree(sc, "rmap record bag");
}
/* Check for any obvious conflicts with this shared/CoW staging extent. */
@@ -705,7 +699,6 @@ xrep_refcountbt(
{
struct xrep_refc *rr;
struct xfs_mount *mp = sc->mp;
- char *descr;
int error;
/* We require the rmapbt to rebuild anything. */
@@ -718,11 +711,9 @@ xrep_refcountbt(
rr->sc = sc;
/* Set up enough storage to handle one refcount record per block. */
- descr = xchk_xfile_ag_descr(sc, "reference count records");
- error = xfarray_create(descr, mp->m_sb.sb_agblocks,
+ error = xfarray_create("reference count records", mp->m_sb.sb_agblocks,
sizeof(struct xfs_refcount_irec),
&rr->refcount_records);
- kfree(descr);
if (error)
goto out_rr;
diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c
index e8080eba37d2..436dcc3fe854 100644
--- a/fs/xfs/scrub/rmap_repair.c
+++ b/fs/xfs/scrub/rmap_repair.c
@@ -161,14 +161,11 @@ xrep_setup_ag_rmapbt(
struct xfs_scrub *sc)
{
struct xrep_rmap *rr;
- char *descr;
int error;
xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP);
- descr = xchk_xfile_ag_descr(sc, "reverse mapping records");
- error = xrep_setup_xfbtree(sc, descr);
- kfree(descr);
+ error = xrep_setup_xfbtree(sc, "reverse mapping records");
if (error)
return error;
diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c
index 7c7366c98338..746990ae0ec1 100644
--- a/fs/xfs/scrub/rtsummary.c
+++ b/fs/xfs/scrub/rtsummary.c
@@ -42,7 +42,6 @@ xchk_setup_rtsummary(
struct xfs_scrub *sc)
{
struct xfs_mount *mp = sc->mp;
- char *descr;
struct xchk_rtsummary *rts;
int error;
@@ -62,10 +61,8 @@ xchk_setup_rtsummary(
* Create an xfile to construct a new rtsummary file. The xfile allows
* us to avoid pinning kernel memory for this purpose.
*/
- descr = xchk_xfile_descr(sc, "realtime summary file");
- error = xfile_create(descr, XFS_FSB_TO_B(mp, mp->m_rsumblocks),
- &sc->xfile);
- kfree(descr);
+ error = xfile_create("realtime summary file",
+ XFS_FSB_TO_B(mp, mp->m_rsumblocks), &sc->xfile);
if (error)
return error;
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 35a8c1b8b3cb..5f10a61c9b25 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -237,7 +237,8 @@ xfs_bmap_update_diff_items(
struct xfs_bmap_intent *ba = bi_entry(a);
struct xfs_bmap_intent *bb = bi_entry(b);
- return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
+ return ((ba->bi_owner->i_ino > bb->bi_owner->i_ino) -
+ (ba->bi_owner->i_ino < bb->bi_owner->i_ino));
}
/* Log bmap updates in the intent item. */
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 0d73b59f1c9e..0f75adbd0c6b 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1472,9 +1472,15 @@ xfs_qm_dqflush(
return 0;
out_abort:
+ /*
+ * Shut down the log before removing the dquot item from the AIL.
+ * Otherwise, the log tail may advance past this item's LSN while
+ * log writes are still in progress, making these unflushed changes
+ * unrecoverable on the next mount.
+ */
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
xfs_trans_ail_delete(lip, 0);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
xfs_dqfunlock(dqp);
return error;
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a4c00decd97b..bf9245b1b4c6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1396,6 +1396,8 @@ xlog_alloc_log(
if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
+ else if (mp->m_sb.sb_logsectsize > 0)
+ log->l_iclog_roundoff = mp->m_sb.sb_logsectsize;
else
log->l_iclog_roundoff = BBSIZE;
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 0cc66f8d28e7..c1b7ec524464 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -216,6 +216,25 @@ const volatile void * __must_check_fn(const volatile void *val)
#define return_ptr(p) return no_free_ptr(p)
+/*
+ * Only for situations where an allocation is handed in to another function
+ * and consumed by that function on success.
+ *
+ * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
+ *
+ * setup(f);
+ * if (some_condition)
+ * return -EINVAL;
+ * ....
+ * ret = bar(f);
+ * if (!ret)
+ * retain_and_null_ptr(f);
+ * return ret;
+ *
+ * After retain_and_null_ptr(f) the variable f is NULL and cannot be
+ * dereferenced anymore.
+ */
+#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
/*
* DEFINE_CLASS(name, type, exit, init, init_args...):
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index ecf203f01034..a3ae683affa5 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -42,7 +42,8 @@ extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
-int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f70b048596b5..bd34b70dd0cf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -7,6 +7,7 @@
#include <linux/fs.h> /* only for vma_is_dax() */
#include <linux/kobject.h>
+#include <linux/secretmem.h>
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -262,6 +263,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
inode = vma->vm_file->f_inode;
+ if (secretmem_mapping(inode->i_mapping))
+ return false;
+
return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 8a9792a6427a..47a0feffc121 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -37,7 +37,8 @@ static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_inner_mac_header(skb);
}
-int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr);
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index f18f50d39598..9c61e3b96628 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -467,6 +467,7 @@ enum {
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
+ REQ_F_BUF_MORE_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -547,6 +548,8 @@ enum {
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
/* buffer ring head needs incrementing on put */
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
+ /* incremental buffer consumption, more space available */
+ REQ_F_BUF_MORE = IO_REQ_FLAG(REQ_F_BUF_MORE_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fa711f80957b..25f51bf3c351 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -694,6 +694,9 @@ extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
#endif
+/* Disable or mask interrupts during a kernel kexec */
+extern void machine_kexec_mask_interrupts(void);
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 70c0948f978e..0225121f3013 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -394,6 +394,7 @@
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4)
+#define GITS_TYPER_IDBITS GENMASK_ULL(12, 8)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 2b1a816e4d59..6ea35c8ce00f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -12282,7 +12282,9 @@ struct mlx5_ifc_mtrc_ctrl_bits {
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
- u8 reserved_at_8[0x7];
+ u8 reserved_at_8[0x5];
+ u8 host_pf_not_exist[0x1];
+ u8 reserved_at_14[0x1];
u8 host_pf_disabled[0x1];
u8 host_num_of_vfs[0x10];
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 8fc2b328ec4d..68068ba57569 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -423,14 +423,12 @@ struct mmc_host {
struct mmc_ios ios; /* current io bus settings */
+ bool claimed; /* host exclusively claimed */
+
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
- unsigned int claimed:1; /* host exclusively claimed */
unsigned int doing_init_tune:1; /* initial tuning in progress */
- unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
- unsigned int retune_now:1; /* do re-tuning at next req */
- unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
@@ -438,6 +436,9 @@ struct mmc_host {
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
+ bool can_retune; /* re-tuning can be used */
+ bool retune_now; /* do re-tuning at next req */
+ bool retune_paused; /* re-tuning is temporarily disabled */
int need_retune; /* re-tuning is needed */
int hold_retune; /* hold off re-tuning */
unsigned int retune_period; /* re-tuning period in secs */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 11be70a7929f..2f4243b61a52 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -253,6 +253,24 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
+/* virtual device features */
+#define MASTER_UPPER_DEV_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+#define MASTER_UPPER_DEV_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_PARTIAL)
+
+#define MASTER_UPPER_DEV_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_GSO_SOFTWARE)
+
+#define MASTER_UPPER_DEV_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+#define MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP)
+
static inline netdev_features_t netdev_base_features(netdev_features_t features)
{
features &= ~NETIF_F_ONE_FOR_ALL;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 77a99c8ab01c..fcc1509ca7cb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -308,7 +308,9 @@ struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
- int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
+ int (*parse)(const struct sk_buff *skb,
+ const struct net_device *dev,
+ unsigned char *haddr);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
@@ -3163,7 +3165,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
if (!dev->header_ops || !dev->header_ops->parse)
return 0;
- return dev->header_ops->parse(skb, haddr);
+ return dev->header_ops->parse(skb, dev, haddr);
}
static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
@@ -3256,18 +3258,51 @@ struct softnet_data {
};
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+DECLARE_PER_CPU(struct page_pool *, system_page_pool);
+
+#define XMIT_RECURSION_LIMIT 8
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
+
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
#else
static inline int dev_recursion_level(void)
{
return current->net_xmit.recursion;
}
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ current->net_xmit.recursion++;
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ current->net_xmit.recursion--;
+}
#endif
void __netif_schedule(struct Qdisc *q);
@@ -4993,6 +5028,7 @@ static inline netdev_features_t netdev_add_tso_features(netdev_features_t featur
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
+void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
diff --git a/include/linux/security.h b/include/linux/security.h
index 2ec8f3014757..2c6db949ad1a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -140,6 +140,7 @@ enum lockdown_reason {
LOCKDOWN_BPF_WRITE_USER,
LOCKDOWN_DBG_WRITE_KERNEL,
LOCKDOWN_RTAS_ERROR_INJECTION,
+ LOCKDOWN_XEN_USER_ACTIONS,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d79ff252cfdc..366dc49c7b4a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -268,7 +268,6 @@ struct plat_stmmacenet_data {
int int_snapshot_num;
int msi_mac_vec;
int msi_wol_vec;
- int msi_lpi_vec;
int msi_sfty_ce_vec;
int msi_sfty_ue_vec;
int msi_rx_base_vec;
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index ae04054a1be3..e6ca052b2a85 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -34,6 +34,13 @@ enum {
TRACE_INTERNAL_SIRQ_BIT,
TRACE_INTERNAL_TRANSITION_BIT,
+ /* Internal event use recursion bits */
+ TRACE_INTERNAL_EVENT_BIT,
+ TRACE_INTERNAL_EVENT_NMI_BIT,
+ TRACE_INTERNAL_EVENT_IRQ_BIT,
+ TRACE_INTERNAL_EVENT_SIRQ_BIT,
+ TRACE_INTERNAL_EVENT_TRANSITION_BIT,
+
TRACE_BRANCH_BIT,
/*
* Abuse of the trace_recursion.
@@ -58,6 +65,8 @@ enum {
#define TRACE_LIST_START TRACE_INTERNAL_BIT
+#define TRACE_EVENT_START TRACE_INTERNAL_EVENT_BIT
+
#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
/*
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index d0cb0e02cd6a..34be2d579045 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -146,6 +146,7 @@ extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
extern void uprobe_handle_trampoline(struct pt_regs *regs);
extern void *arch_uprobe_trampoline(unsigned long *psize);
extern unsigned long uprobe_get_trampoline_vaddr(void);
+extern unsigned long arch_uprobe_get_xol_area(void);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
diff --git a/include/linux/usb.h b/include/linux/usb.h
index e76e3515a1da..2c0827cce489 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1830,14 +1830,18 @@ void usb_free_coherent(struct usb_device *dev, size_t size,
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
+/* Maximum value allowed for timeout in synchronous routines below */
+#define USB_MAX_SYNCHRONOUS_TIMEOUT 60000 /* ms */
+
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
void *data, __u16 size, int timeout);
extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout);
extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
- void *data, int len, int *actual_length,
- int timeout);
+ void *data, int len, int *actual_length, int timeout);
+extern int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout);
/* wrappers around usb_control_msg() for the most common standard requests */
int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 4bc6bb01a0eb..b593473d8d32 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -130,6 +130,7 @@ struct driver_info {
#define FLAG_MULTI_PACKET 0x2000
#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
#define FLAG_NOARP 0x8000 /* device can't do ARP */
+#define FLAG_NOMAXMTU 0x10000 /* allow max_mtu above hard_mtu */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 877f9b270cf6..a1f730d9f01b 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,6 +296,7 @@ struct dsa_port {
struct devlink_port devlink_port;
struct phylink *pl;
struct phylink_config pl_config;
+ netdevice_tracker conduit_tracker;
struct dsa_lag *lag;
struct net_device *hsr_dev;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 399592405c72..17913fca5445 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -156,6 +156,18 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
{
int pkt_len, err;
+ if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) {
+ if (dev) {
+ net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
+ dev->name);
+ DEV_STATS_INC(dev, tx_errors);
+ }
+ kfree_skb(skb);
+ return;
+ }
+
+ dev_xmit_recursion_inc();
+
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
pkt_len = skb->len - skb_inner_network_offset(skb);
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
@@ -165,6 +177,8 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
pkt_len = -1;
iptunnel_xmit_stats(dev, pkt_len);
}
+
+ dev_xmit_recursion_dec();
}
#endif
#endif
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 1f92cc7fdbd2..0a5556ef1672 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -24,6 +24,13 @@
#include <net/ip6_route.h>
#endif
+/* Recursion limit for tunnel xmit to detect routing loops.
+ * Unlike XMIT_RECURSION_LIMIT (8) used in the no-qdisc path, tunnel
+ * recursion involves route lookups and full IP output, consuming much
+ * more stack per level, so a lower limit is needed.
+ */
+#define IP_TUNNEL_RECURSION_LIMIT 4
+
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 79296ed87b9b..36964b86d336 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -873,6 +873,8 @@ struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set,
u64 timeout, u64 expiration, gfp_t gfp);
int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_expr *expr_array[]);
+void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ struct nft_set_elem_expr *elem_expr);
void nft_set_elem_destroy(const struct nft_set *set,
const struct nft_elem_priv *elem_priv,
bool destroy_expr);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 75a0d6095d2e..d3e1f91f81cd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -696,6 +696,34 @@ void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+
+static inline void dev_reset_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ struct Qdisc *qdisc;
+ bool nolock;
+
+ qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ if (!qdisc)
+ return;
+
+ nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+
+ qdisc_reset(qdisc);
+
+ spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
+ spin_unlock_bh(&qdisc->seqlock);
+ }
+}
+
#ifdef CONFIG_NET_SCHED
int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
void *type_data);
@@ -1378,6 +1406,11 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block);
+static inline bool mini_qdisc_pair_inited(struct mini_Qdisc_pair *miniqp)
+{
+ return !!miniqp->p_miniq;
+}
+
void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index c8fa11ebb397..c7650f7de0ff 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -32,6 +32,7 @@ struct tcf_gate_params {
s32 tcfg_clockid;
size_t num_entries;
struct list_head entries;
+ struct rcu_head rcu;
};
#define GATE_ACT_GATE_OPEN BIT(0)
@@ -39,7 +40,7 @@ struct tcf_gate_params {
struct tcf_gate {
struct tc_action common;
- struct tcf_gate_params param;
+ struct tcf_gate_params __rcu *param;
u8 current_gate_status;
ktime_t current_close_time;
u32 current_entry_octets;
@@ -60,47 +61,65 @@ static inline bool is_tcf_gate(const struct tc_action *a)
return false;
}
+static inline struct tcf_gate_params *tcf_gate_params_locked(const struct tc_action *a)
+{
+ struct tcf_gate *gact = to_gate(a);
+
+ return rcu_dereference_protected(gact->param,
+ lockdep_is_held(&gact->tcf_lock));
+}
+
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
s32 tcfg_prio;
- tcfg_prio = to_gate(a)->param.tcfg_priority;
+ p = tcf_gate_params_locked(a);
+ tcfg_prio = p->tcfg_priority;
return tcfg_prio;
}
static inline u64 tcf_gate_basetime(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u64 tcfg_basetime;
- tcfg_basetime = to_gate(a)->param.tcfg_basetime;
+ p = tcf_gate_params_locked(a);
+ tcfg_basetime = p->tcfg_basetime;
return tcfg_basetime;
}
static inline u64 tcf_gate_cycletime(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u64 tcfg_cycletime;
- tcfg_cycletime = to_gate(a)->param.tcfg_cycletime;
+ p = tcf_gate_params_locked(a);
+ tcfg_cycletime = p->tcfg_cycletime;
return tcfg_cycletime;
}
static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u64 tcfg_cycletimeext;
- tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext;
+ p = tcf_gate_params_locked(a);
+ tcfg_cycletimeext = p->tcfg_cycletime_ext;
return tcfg_cycletimeext;
}
static inline u32 tcf_gate_num_entries(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u32 num_entries;
- num_entries = to_gate(a)->param.num_entries;
+ p = tcf_gate_params_locked(a);
+ num_entries = p->num_entries;
return num_entries;
}
@@ -114,7 +133,7 @@ static inline struct action_gate_entry
u32 num_entries;
int i = 0;
- p = &to_gate(a)->param;
+ p = tcf_gate_params_locked(a);
num_entries = p->num_entries;
list_for_each_entry(entry, &p->entries, list)
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index a93dc51f6323..6e2c5c77031f 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -47,7 +47,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
{
- return 0;
+ return -EPFNOSUPPORT;
}
#endif
diff --git a/include/net/xdp.h b/include/net/xdp.h
index b80953f0affb..05be2de2fd47 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -356,6 +356,38 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
int xdp_reg_mem_model(struct xdp_mem_info *mem,
enum xdp_mem_type type, void *allocator);
void xdp_unreg_mem_model(struct xdp_mem_info *mem);
+int xdp_reg_page_pool(struct page_pool *pool);
+void xdp_unreg_page_pool(const struct page_pool *pool);
+void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
+ const struct page_pool *pool);
+
+/**
+ * xdp_rxq_info_attach_mem_model - attach registered mem info to RxQ info
+ * @xdp_rxq: XDP RxQ info to attach the memory info to
+ * @mem: already registered memory info
+ *
+ * If the driver registers its memory providers manually, it must use this
+ * function instead of xdp_rxq_info_reg_mem_model().
+ */
+static inline void
+xdp_rxq_info_attach_mem_model(struct xdp_rxq_info *xdp_rxq,
+ const struct xdp_mem_info *mem)
+{
+ xdp_rxq->mem = *mem;
+}
+
+/**
+ * xdp_rxq_info_detach_mem_model - detach registered mem info from RxQ info
+ * @xdp_rxq: XDP RxQ info to detach the memory info from
+ *
+ * If the driver registers its memory providers manually and then attaches it
+ * via xdp_rxq_info_attach_mem_model(), it must call this function before
+ * xdp_rxq_info_unreg().
+ */
+static inline void xdp_rxq_info_detach_mem_model(struct xdp_rxq_info *xdp_rxq)
+{
+ xdp_rxq->mem = (struct xdp_mem_info){ };
+}
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index b37eb0a7060f..abfa3bdc3e11 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -397,7 +397,13 @@ TRACE_EVENT(rss_stat,
TP_fast_assign(
__entry->mm_id = mm_ptr_to_hash(mm);
- __entry->curr = !!(current->mm == mm);
+ /*
+ * curr is true if the mm matches the current task's mm_struct.
+ * Since kthreads (PF_KTHREAD) have no mm_struct of their own
+ * but can borrow one via kthread_use_mm(), we must filter them
+ * out to avoid incorrectly attributing the RSS update to them.
+ */
+ __entry->curr = current->mm == mm && !(current->flags & PF_KTHREAD);
__entry->member = member;
__entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member])
<< PAGE_SHIFT);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index eea3769765ac..c6cae9456698 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -274,6 +274,7 @@
EM(rxrpc_call_put_kernel, "PUT kernel ") \
EM(rxrpc_call_put_poke, "PUT poke ") \
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_recvmsg_peek_nowait, "PUT peek-nwt") \
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
@@ -291,6 +292,9 @@
EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
EM(rxrpc_call_see_input, "SEE input ") \
EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_call_see_recvmsg_requeue, "SEE recv-rqu") \
+ EM(rxrpc_call_see_recvmsg_requeue_first, "SEE recv-rqF") \
+ EM(rxrpc_call_see_recvmsg_requeue_move, "SEE recv-rqM") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
diff --git a/init/Kconfig b/init/Kconfig
index 219ccdb0af73..f4b91b1857bf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -104,7 +104,10 @@ config GCC_ASM_GOTO_OUTPUT_BROKEN
config CC_HAS_ASM_GOTO_OUTPUT
def_bool y
depends on !GCC_ASM_GOTO_OUTPUT_BROKEN
+ # Detect basic support
depends on $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
+ # Detect clang (< v17) scoped label issues
+ depends on $(success,echo 'void b(void **);void* c(void);int f(void){{asm goto(""::::l0);return 0;l0:return 1;}void *x __attribute__((cleanup(b)))=c();{asm goto(""::::l1);return 2;l1:return 3;}}' | $(CC) -x c - -c -o /dev/null)
config CC_HAS_ASM_GOTO_TIED_OUTPUT
depends on CC_HAS_ASM_GOTO_OUTPUT
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 9bd27deeee6f..590973c47cd3 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -62,9 +62,17 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
- list_add(&buf->list, &bl->buf_list);
- req->flags &= ~REQ_F_BUFFER_SELECTED;
+ /*
+ * If the buffer list was upgraded to a ring-based one, or removed,
+ * while the request was in-flight in io-wq, drop it.
+ */
req->buf_index = buf->bgid;
+ if (bl && !(bl->flags & IOBL_BUF_RING))
+ list_add(&buf->list, &bl->buf_list);
+ else
+ kmem_cache_free(io_buf_cachep, buf);
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ req->kbuf = NULL;
io_ring_submit_unlock(ctx, issue_flags);
return true;
@@ -167,7 +175,8 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
* the transfer completes (or if we get -EAGAIN and must poll of
* retry).
*/
- io_kbuf_commit(req, bl, *len, 1);
+ if (!io_kbuf_commit(req, bl, *len, 1))
+ req->flags |= REQ_F_BUF_MORE;
req->buf_list = NULL;
}
return ret;
@@ -313,7 +322,8 @@ int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
*/
if (ret > 0) {
req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
- io_kbuf_commit(req, bl, arg->out_len, ret);
+ if (!io_kbuf_commit(req, bl, arg->out_len, ret))
+ req->flags |= REQ_F_BUF_MORE;
}
} else {
ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index a3ad8aea45c8..903800b20ff3 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -165,7 +165,9 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
ret = io_kbuf_commit(req, bl, len, nr);
req->buf_index = bl->bgid;
}
- req->flags &= ~REQ_F_BUFFER_RING;
+ if (ret && (req->flags & REQ_F_BUF_MORE))
+ ret = false;
+ req->flags &= ~(REQ_F_BUFFER_RING | REQ_F_BUF_MORE);
return ret;
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index f927844c8ada..e5d0cc8a8a56 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -338,16 +338,19 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
struct proto *prot = READ_ONCE(sk->sk_prot);
int ret, arg = 0;
- if (!prot || !prot->ioctl)
- return -EOPNOTSUPP;
-
switch (cmd->cmd_op) {
case SOCKET_URING_OP_SIOCINQ:
+ if (!prot || !prot->ioctl)
+ return -EOPNOTSUPP;
+
ret = prot->ioctl(sk, SIOCINQ, &arg);
if (ret)
return ret;
return arg;
case SOCKET_URING_OP_SIOCOUTQ:
+ if (!prot || !prot->ioctl)
+ return -EOPNOTSUPP;
+
ret = prot->ioctl(sk, SIOCOUTQ, &arg);
if (ret)
return ret;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index cb756ee15b6f..046f671532b0 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2530,6 +2530,7 @@ static void cgroup_migrate_add_task(struct task_struct *task,
mgctx->tset.nr_tasks++;
+ css_set_skip_task_iters(cset, task);
list_move_tail(&task->cg_list, &cset->mg_tasks);
if (list_empty(&cset->mg_node))
list_add_tail(&cset->mg_node,
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index e3c8d9900ca7..1ff26dc3bdb0 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1493,6 +1493,12 @@ static const struct vm_special_mapping xol_mapping = {
.fault = xol_fault,
};
+unsigned long __weak arch_uprobe_get_xol_area(void)
+{
+ /* Try to map as high as possible, this is only a hint. */
+ return get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
+}
+
/* Slot allocation for XOL */
static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
{
@@ -1508,9 +1514,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
}
if (!area->vaddr) {
- /* Try to map as high as possible, this is only a hint. */
- area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
- PAGE_SIZE, 0, 0);
+ area->vaddr = arch_uprobe_get_xol_area();
if (IS_ERR_VALUE(area->vaddr)) {
ret = area->vaddr;
goto fail;
diff --git a/kernel/fork.c b/kernel/fork.c
index e5ec098a6f61..55086df4d24c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -3248,7 +3248,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
- if (fs->users == 1)
+ if (!(unshare_flags & CLONE_NEWNS) && fs->users == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 529adb1f5859..875f25ed6f71 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -141,6 +141,12 @@ config GENERIC_IRQ_DEBUGFS
If you don't know what to do here, say N.
+# Clear forwarded VM interrupts during kexec.
+# This option ensures the kernel clears active states for interrupts
+# forwarded to virtual machines (VMs) during a machine kexec.
+config GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD
+ bool
+
endmenu
config GENERIC_IRQ_MULTI_HANDLER
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index f19d3080bf11..c0f44c06d69d 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
+obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o kexec.o
obj-$(CONFIG_IRQ_TIMINGS) += timings.o
ifeq ($(CONFIG_TEST_IRQ_TIMINGS),y)
CFLAGS_timings.o += -DDEBUG
diff --git a/kernel/irq/kexec.c b/kernel/irq/kexec.c
new file mode 100644
index 000000000000..0f9548c1708d
--- /dev/null
+++ b/kernel/irq/kexec.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqnr.h>
+
+#include "internals.h"
+
+void machine_kexec_mask_interrupts(void)
+{
+ struct irq_desc *desc;
+ unsigned int i;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+ int check_eoi = 1;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD)) {
+ /*
+ * First try to remove the active state from an interrupt which is forwarded
+ * to a VM. If the interrupt is not forwarded, try to EOI the interrupt.
+ */
+ check_eoi = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+ }
+
+ if (check_eoi && chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+ chip->irq_disable(&desc->irq_data);
+ }
+}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index da59c68df841..00ba56a27b66 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1077,25 +1077,23 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
lockdep_assert_held(&kprobe_mutex);
ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
- if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
+ if (ret < 0)
return ret;
if (*cnt == 0) {
ret = register_ftrace_function(ops);
- if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
- goto err_ftrace;
+ if (ret < 0) {
+ /*
+ * At this point, sinec ops is not registered, we should be sefe from
+ * registering empty filter.
+ */
+ ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
+ return ret;
+ }
}
(*cnt)++;
return ret;
-
-err_ftrace:
- /*
- * At this point, sinec ops is not registered, we should be sefe from
- * registering empty filter.
- */
- ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
- return ret;
}
static int arm_kprobe_ftrace(struct kprobe *p)
@@ -1113,6 +1111,10 @@ static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int ret;
lockdep_assert_held(&kprobe_mutex);
+ if (unlikely(kprobe_ftrace_disabled)) {
+ /* Now ftrace is disabled forever, disarm is already done. */
+ return 0;
+ }
if (*cnt == 1) {
ret = unregister_ftrace_function(ops);
@@ -1452,7 +1454,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
unsigned long offset, bool *on_func_entry)
{
if ((symbol_name && addr) || (!symbol_name && !addr))
- goto invalid;
+ return ERR_PTR(-EINVAL);
if (symbol_name) {
/*
@@ -1482,11 +1484,10 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
* at the start of the function.
*/
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
- if (addr)
- return addr;
+ if (!addr)
+ return ERR_PTR(-EINVAL);
-invalid:
- return ERR_PTR(-EINVAL);
+ return addr;
}
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
@@ -1509,15 +1510,15 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
if (unlikely(!ap))
return NULL;
- if (p != ap) {
- list_for_each_entry(list_p, &ap->list, list)
- if (list_p == p)
- /* kprobe p is a valid probe */
- goto valid;
- return NULL;
- }
-valid:
- return ap;
+ if (p == ap)
+ return ap;
+
+ list_for_each_entry(list_p, &ap->list, list)
+ if (list_p == p)
+ /* kprobe p is a valid probe */
+ return ap;
+
+ return NULL;
}
/*
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7e79f39c7bcf..545f197d330c 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4319,7 +4319,6 @@ static int scx_cgroup_init(void)
ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, cgroup_init,
css->cgroup, &args);
if (ret) {
- css_put(css);
scx_ops_error("ops.cgroup_init() failed (%d)", ret);
return ret;
}
@@ -4775,14 +4774,29 @@ static void schedule_scx_ops_disable_work(void)
kthread_queue_work(helper, &scx_ops_disable_work);
}
-static void scx_ops_disable(enum scx_exit_kind kind)
+/*
+ * Claim the exit. The caller must ensure that the helper kthread work
+ * is kicked before the current task can be preempted. Once exit_kind is
+ * claimed, scx_error() can no longer trigger, so if the current task gets
+ * preempted and the BPF scheduler fails to schedule it back, the helper work
+ * will never be kicked and the whole system can wedge.
+ */
+static bool scx_claim_exit(enum scx_exit_kind kind)
{
int none = SCX_EXIT_NONE;
+ lockdep_assert_preemption_disabled();
+
+ return atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
+}
+
+static void scx_ops_disable(enum scx_exit_kind kind)
+{
if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
kind = SCX_EXIT_ERROR;
- atomic_try_cmpxchg(&scx_exit_kind, &none, kind);
+ guard(preempt)();
+ scx_claim_exit(kind);
schedule_scx_ops_disable_work();
}
@@ -5082,10 +5096,11 @@ static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
const char *fmt, ...)
{
struct scx_exit_info *ei = scx_exit_info;
- int none = SCX_EXIT_NONE;
va_list args;
- if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
+ guard(preempt)();
+
+ if (!scx_claim_exit(kind))
return;
ei->exit_code = exit_code;
@@ -5150,19 +5165,29 @@ static int validate_ops(const struct sched_ext_ops *ops)
return 0;
}
-static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+/*
+ * scx_ops_enable() is offloaded to a dedicated system-wide RT kthread to avoid
+ * starvation. During the READY -> ENABLED task switching loop, the calling
+ * thread's sched_class gets switched from fair to ext. As fair has higher
+ * priority than ext, the calling thread can be indefinitely starved under
+ * fair-class saturation, leading to a system hang.
+ */
+struct scx_enable_cmd {
+ struct kthread_work work;
+ struct sched_ext_ops *ops;
+ int ret;
+};
+
+static void scx_ops_enable_workfn(struct kthread_work *work)
{
+ struct scx_enable_cmd *cmd =
+ container_of(work, struct scx_enable_cmd, work);
+ struct sched_ext_ops *ops = cmd->ops;
struct scx_task_iter sti;
struct task_struct *p;
unsigned long timeout;
int i, cpu, node, ret;
- if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
- cpu_possible_mask)) {
- pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
- return -EINVAL;
- }
-
mutex_lock(&scx_ops_enable_mutex);
if (!scx_ops_helper) {
@@ -5429,7 +5454,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
atomic_long_inc(&scx_enable_seq);
- return 0;
+ cmd->ret = 0;
+ return;
err_del:
kobject_del(scx_root_kobj);
@@ -5442,7 +5468,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
}
err_unlock:
mutex_unlock(&scx_ops_enable_mutex);
- return ret;
+ cmd->ret = ret;
+ return;
err_disable_unlock_all:
scx_cgroup_unlock();
@@ -5461,7 +5488,39 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
*/
scx_ops_error("scx_ops_enable() failed (%d)", ret);
kthread_flush_work(&scx_ops_disable_work);
- return 0;
+ cmd->ret = 0;
+}
+
+static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+{
+ static struct kthread_worker *helper;
+ static DEFINE_MUTEX(helper_mutex);
+ struct scx_enable_cmd cmd;
+
+ if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
+ cpu_possible_mask)) {
+ pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
+ return -EINVAL;
+ }
+
+ if (!READ_ONCE(helper)) {
+ mutex_lock(&helper_mutex);
+ if (!helper) {
+ helper = scx_create_rt_helper("scx_ops_enable_helper");
+ if (!helper) {
+ mutex_unlock(&helper_mutex);
+ return -ENOMEM;
+ }
+ }
+ mutex_unlock(&helper_mutex);
+ }
+
+ kthread_init_work(&cmd.work, scx_ops_enable_workfn);
+ cmd.ops = ops;
+
+ kthread_queue_work(READ_ONCE(helper), &cmd.work);
+ kthread_flush_work(&cmd.work);
+ return cmd.ret;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6efb1dfcd943..efd3cbefb5a2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -551,6 +551,21 @@ static inline bool entity_before(const struct sched_entity *a,
return (s64)(a->deadline - b->deadline) < 0;
}
+/*
+ * Per avg_vruntime() below, cfs_rq::zero_vruntime is only slightly stale
+ * and this value should be no more than two lag bounds. Which puts it in the
+ * general order of:
+ *
+ * (slice + TICK_NSEC) << NICE_0_LOAD_SHIFT
+ *
+ * which is around 44 bits in size (on 64bit); that is 20 for
+ * NICE_0_LOAD_SHIFT, another 20 for NSEC_PER_MSEC and then a handful for
+ * however many msec the actual slice+tick ends up begin.
+ *
+ * (disregarding the actual divide-by-weight part makes for the worst case
+ * weight of 2, which nicely cancels vs the fuzz in zero_vruntime not actually
+ * being the zero-lag point).
+ */
static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return (s64)(se->vruntime - cfs_rq->zero_vruntime);
@@ -638,39 +653,61 @@ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
static inline
-void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+void update_zero_vruntime(struct cfs_rq *cfs_rq, s64 delta)
{
/*
- * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
+ * v' = v + d ==> avg_vruntime' = avg_vruntime - d*avg_load
*/
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
+ cfs_rq->zero_vruntime += delta;
}
/*
- * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
+ * Specifically: avg_vruntime() + 0 must result in entity_eligible() := true
* For this to be so, the result of this function must have a left bias.
+ *
+ * Called in:
+ * - place_entity() -- before enqueue
+ * - update_entity_lag() -- before dequeue
+ * - entity_tick()
+ *
+ * This means it is one entry 'behind' but that puts it close enough to where
+ * the bound on entity_key() is at most two lag bounds.
*/
u64 avg_vruntime(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
- s64 avg = cfs_rq->avg_vruntime;
- long load = cfs_rq->avg_load;
+ long weight = cfs_rq->avg_load;
+ s64 delta = 0;
- if (curr && curr->on_rq) {
- unsigned long weight = scale_load_down(curr->load.weight);
+ if (curr && !curr->on_rq)
+ curr = NULL;
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
- }
+ if (weight) {
+ s64 runtime = cfs_rq->avg_vruntime;
+
+ if (curr) {
+ unsigned long w = scale_load_down(curr->load.weight);
+
+ runtime += entity_key(cfs_rq, curr) * w;
+ weight += w;
+ }
- if (load) {
/* sign flips effective floor / ceiling */
- if (avg < 0)
- avg -= (load - 1);
- avg = div_s64(avg, load);
+ if (runtime < 0)
+ runtime -= (weight - 1);
+
+ delta = div_s64(runtime, weight);
+ } else if (curr) {
+ /*
+ * When there is but one element, it is the average.
+ */
+ delta = curr->vruntime - cfs_rq->zero_vruntime;
}
- return cfs_rq->zero_vruntime + avg;
+ update_zero_vruntime(cfs_rq, delta);
+
+ return cfs_rq->zero_vruntime;
}
/*
@@ -744,16 +781,6 @@ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
return vruntime_eligible(cfs_rq, se->vruntime);
}
-static void update_zero_vruntime(struct cfs_rq *cfs_rq)
-{
- u64 vruntime = avg_vruntime(cfs_rq);
- s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
-
- avg_vruntime_update(cfs_rq, delta);
-
- cfs_rq->zero_vruntime = vruntime;
-}
-
static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
{
struct sched_entity *root = __pick_root_entity(cfs_rq);
@@ -824,7 +851,6 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
avg_vruntime_add(cfs_rq, se);
- update_zero_vruntime(cfs_rq);
se->min_vruntime = se->vruntime;
se->min_slice = se->slice;
rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
@@ -836,7 +862,6 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
&min_vruntime_cb);
avg_vruntime_sub(cfs_rq, se);
- update_zero_vruntime(cfs_rq);
}
struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
@@ -5700,6 +5725,11 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
update_load_avg(cfs_rq, curr, UPDATE_TG);
update_cfs_group(curr);
+ /*
+ * Pulls along cfs_rq::zero_vruntime.
+ */
+ avg_vruntime(cfs_rq);
+
#ifdef CONFIG_SCHED_HRTICK
/*
* queued ticks are scheduled to match the slice, so don't bother
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 624ef809f671..1f0f0d9a5a5c 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -155,6 +155,14 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return cpuidle_enter(drv, dev, next_state);
}
+static void idle_call_stop_or_retain_tick(bool stop_tick)
+{
+ if (stop_tick || tick_nohz_tick_stopped())
+ tick_nohz_idle_stop_tick();
+ else
+ tick_nohz_idle_retain_tick();
+}
+
/**
* cpuidle_idle_call - the main idle function
*
@@ -164,7 +172,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* set, and it returns with polling set. If it ever stops polling, it
* must clear the polling bit.
*/
-static void cpuidle_idle_call(void)
+static void cpuidle_idle_call(bool stop_tick)
{
struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
@@ -180,7 +188,7 @@ static void cpuidle_idle_call(void)
}
if (cpuidle_not_available(drv, dev)) {
- tick_nohz_idle_stop_tick();
+ idle_call_stop_or_retain_tick(stop_tick);
default_idle_call();
goto exit_idle;
@@ -214,24 +222,35 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
- } else {
- bool stop_tick = true;
+ } else if (drv->state_count > 1) {
+ /*
+ * stop_tick is expected to be true by default by cpuidle
+ * governors, which allows them to select idle states with
+ * target residency above the tick period length.
+ */
+ stop_tick = true;
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev, &stop_tick);
- if (stop_tick || tick_nohz_tick_stopped())
- tick_nohz_idle_stop_tick();
- else
- tick_nohz_idle_retain_tick();
+ idle_call_stop_or_retain_tick(stop_tick);
entered_state = call_cpuidle(drv, dev, next_state);
/*
* Give the governor an opportunity to reflect on the outcome
*/
cpuidle_reflect(dev, entered_state);
+ } else {
+ idle_call_stop_or_retain_tick(stop_tick);
+
+ /*
+ * If there is only a single idle state (or none), there is
+ * nothing meaningful for the governor to choose. Skip the
+ * governor and always use state 0.
+ */
+ call_cpuidle(drv, dev, 0);
}
exit_idle:
@@ -252,6 +271,7 @@ static void cpuidle_idle_call(void)
static void do_idle(void)
{
int cpu = smp_processor_id();
+ bool got_tick = false;
/*
* Check if we need to update blocked load
@@ -323,8 +343,9 @@ static void do_idle(void)
tick_nohz_idle_restart_tick();
cpu_idle_poll();
} else {
- cpuidle_idle_call();
+ cpuidle_idle_call(got_tick);
}
+ got_tick = tick_nohz_idle_got_tick();
arch_cpu_idle_exit();
}
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 1ad88e97b4eb..da7e8a02a096 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -702,7 +702,7 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
*
* Return: jiffies_64 value converted to 64-bit "clock_t" (CLOCKS_PER_SEC)
*/
-u64 jiffies_64_to_clock_t(u64 x)
+notrace u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6958ae79464d..ccfaae8795f0 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1863,7 +1863,7 @@ static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
entries += ret;
entry_bytes += local_read(&head_page->page->commit);
- local_set(&cpu_buffer->head_page->entries, ret);
+ local_set(&head_page->entries, ret);
if (head_page == cpu_buffer->commit_page)
break;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a543bb9d86b8..9e5aa468d7e4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2944,6 +2944,11 @@ static void __ftrace_trace_stack(struct trace_array *tr,
struct ftrace_stack *fstack;
struct stack_entry *entry;
int stackidx;
+ int bit;
+
+ bit = trace_test_and_set_recursion(_THIS_IP_, _RET_IP_, TRACE_EVENT_START);
+ if (bit < 0)
+ return;
/*
* Add one, for this function and the call to save_stack_trace()
@@ -3015,6 +3020,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
__this_cpu_dec(ftrace_stack_reserve);
preempt_enable_notrace();
+ trace_clear_recursion(bit);
}
static inline void ftrace_trace_stack(struct trace_array *tr,
@@ -9227,7 +9233,7 @@ static void
init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
static int
-allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
+allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, unsigned long size)
{
enum ring_buffer_flags rb_flags;
@@ -9277,7 +9283,7 @@ static void free_trace_buffer(struct array_buffer *buf)
}
}
-static int allocate_trace_buffers(struct trace_array *tr, int size)
+static int allocate_trace_buffers(struct trace_array *tr, unsigned long size)
{
int ret;
@@ -10479,7 +10485,7 @@ __init static void enable_instances(void)
__init static int tracer_alloc_buffers(void)
{
- int ring_buf_size;
+ unsigned long ring_buf_size;
int ret = -ENOMEM;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 38218cfe20e9..d07b8062ded3 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -3999,7 +3999,11 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
static __init int setup_trace_event(char *str)
{
- strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
+ if (bootup_event_buf[0] != '\0')
+ strlcat(bootup_event_buf, ",", COMMAND_LINE_SIZE);
+
+ strlcat(bootup_event_buf, str, COMMAND_LINE_SIZE);
+
trace_set_ring_buffer_expanded(NULL);
disable_tracing_selftest("running event tracing");
@@ -4173,26 +4177,22 @@ static __init int event_trace_memsetup(void)
return 0;
}
-__init void
-early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
+/*
+ * Helper function to enable or disable a comma-separated list of events
+ * from the bootup buffer.
+ */
+static __init void __early_set_events(struct trace_array *tr, char *buf, bool enable)
{
char *token;
- int ret;
-
- while (true) {
- token = strsep(&buf, ",");
-
- if (!token)
- break;
+ while ((token = strsep(&buf, ","))) {
if (*token) {
- /* Restarting syscalls requires that we stop them first */
- if (disable_first)
+ if (enable) {
+ if (ftrace_set_clr_event(tr, token, 1))
+ pr_warn("Failed to enable trace event: %s\n", token);
+ } else {
ftrace_set_clr_event(tr, token, 0);
-
- ret = ftrace_set_clr_event(tr, token, 1);
- if (ret)
- pr_warn("Failed to enable trace event: %s\n", token);
+ }
}
/* Put back the comma to allow this to be called again */
@@ -4201,6 +4201,32 @@ early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
}
}
+/**
+ * early_enable_events - enable events from the bootup buffer
+ * @tr: The trace array to enable the events in
+ * @buf: The buffer containing the comma separated list of events
+ * @disable_first: If true, disable all events in @buf before enabling them
+ *
+ * This function enables events from the bootup buffer. If @disable_first
+ * is true, it will first disable all events in the buffer before enabling
+ * them.
+ *
+ * For syscall events, which rely on a global refcount to register the
+ * SYSCALL_WORK_SYSCALL_TRACEPOINT flag (especially for pid 1), we must
+ * ensure the refcount hits zero before re-enabling them. A simple
+ * "disable then enable" per-event is not enough if multiple syscalls are
+ * used, as the refcount will stay above zero. Thus, we need a two-phase
+ * approach: disable all, then enable all.
+ */
+__init void
+early_enable_events(struct trace_array *tr, char *buf, bool disable_first)
+{
+ if (disable_first)
+ __early_set_events(tr, buf, false);
+
+ __early_set_events(tr, buf, true);
+}
+
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 655246a9bec3..47ea114ca9f4 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -271,10 +271,12 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops)
{
+ unsigned long *task_var = fgraph_get_task_var(gops);
+
ftrace_graph_addr_finish(gops, trace);
- if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
- trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
+ if (*task_var & TRACE_GRAPH_NOTRACE) {
+ *task_var &= ~TRACE_GRAPH_NOTRACE;
return;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3840d7ce9cda..b2fdb8719a74 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -6209,7 +6209,7 @@ static void pr_cont_worker_id(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- if (pool->flags & WQ_BH)
+ if (pool->flags & POOL_BH)
pr_cont("bh%s",
pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : "");
else
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 81f29c29f47b..5d3802eba52a 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -316,7 +316,7 @@ int __init xbc_node_compose_key_after(struct xbc_node *root,
depth ? "." : "");
if (ret < 0)
return ret;
- if (ret > size) {
+ if (ret >= size) {
size = 0;
} else {
size -= ret;
@@ -532,9 +532,9 @@ static char *skip_spaces_until_newline(char *p)
static int __init __xbc_open_brace(char *p)
{
/* Push the last key as open brace */
- open_brace[brace_index++] = xbc_node_index(last_parent);
if (brace_index >= XBC_DEPTH_MAX)
return xbc_parse_error("Exceed max depth of braces", p);
+ open_brace[brace_index++] = xbc_node_index(last_parent);
return 0;
}
@@ -712,7 +712,8 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (op == ':') {
unsigned short nidx = child->next;
- xbc_init_node(child, v, XBC_VALUE);
+ if (xbc_init_node(child, v, XBC_VALUE) < 0)
+ return xbc_parse_error("Failed to override value", v);
child->next = nidx; /* keep subkeys */
goto array;
}
@@ -791,7 +792,7 @@ static int __init xbc_verify_tree(void)
/* Brace closing */
if (brace_index) {
- n = &xbc_nodes[open_brace[brace_index]];
+ n = &xbc_nodes[open_brace[brace_index - 1]];
return xbc_parse_error("Brace is not closed",
xbc_node_get_data(n));
}
diff --git a/mm/compaction.c b/mm/compaction.c
index eb5474dea04d..66032064387e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -83,6 +83,7 @@ static inline bool is_via_compact_memory(int order) { return false; }
static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
{
post_alloc_hook(page, order, __GFP_MOVABLE);
+ set_page_refcounted(page);
return page;
}
#define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__))
@@ -1869,6 +1870,7 @@ static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long da
dst = (struct folio *)freepage;
post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
+ set_page_refcounted(&dst->page);
if (order)
prep_compound_page(&dst->page, order);
cc->nr_freepages -= 1 << order;
diff --git a/mm/internal.h b/mm/internal.h
index 9e0577413087..b7b942767c70 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -729,8 +729,7 @@ static inline void prep_compound_tail(struct page *head, int tail_idx)
extern void prep_compound_page(struct page *page, unsigned int order);
-extern void post_alloc_hook(struct page *page, unsigned int order,
- gfp_t gfp_flags);
+void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index b301ca337508..0ed501694a5a 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -13,6 +13,7 @@
#include <linux/hash.h>
#include <linux/irq_work.h>
#include <linux/jhash.h>
+#include <linux/kasan-enabled.h>
#include <linux/kcsan-checks.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h>
@@ -880,6 +881,20 @@ void __init kfence_alloc_pool_and_metadata(void)
if (!kfence_sample_interval)
return;
+ /*
+ * If KASAN hardware tags are enabled, disable KFENCE, because it
+ * does not support MTE yet.
+ */
+ if (kasan_hw_tags_enabled()) {
+ pr_info("disabled as KASAN HW tags are enabled\n");
+ if (__kfence_pool) {
+ memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
+ __kfence_pool = NULL;
+ }
+ kfence_sample_interval = 0;
+ return;
+ }
+
/*
* If the pool has already been initialized by arch, there is no need to
* re-allocate the memory pool.
@@ -949,14 +964,14 @@ static int kfence_init_late(void)
#ifdef CONFIG_CONTIG_ALLOC
struct page *pages;
- pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
- NULL);
+ pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL | __GFP_SKIP_KASAN,
+ first_online_node, NULL);
if (!pages)
return -ENOMEM;
__kfence_pool = page_to_virt(pages);
- pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
- NULL);
+ pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL | __GFP_SKIP_KASAN,
+ first_online_node, NULL);
if (pages)
kfence_metadata_init = page_to_virt(pages);
#else
@@ -966,11 +981,13 @@ static int kfence_init_late(void)
return -EINVAL;
}
- __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+ __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE,
+ GFP_KERNEL | __GFP_SKIP_KASAN);
if (!__kfence_pool)
return -ENOMEM;
- kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
+ kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE,
+ GFP_KERNEL | __GFP_SKIP_KASAN);
#endif
if (!kfence_metadata_init)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4282c9d0a5dd..b1a8abe5005e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1542,7 +1542,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
int i;
set_page_private(page, 0);
- set_page_refcounted(page);
arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order);
@@ -1598,6 +1597,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
unsigned int alloc_flags)
{
post_alloc_hook(page, order, gfp_flags);
+ set_page_refcounted(page);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
@@ -6509,7 +6509,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
int ret = 0;
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
- .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .gfp_mask = cc->gfp_mask,
.reason = MR_CONTIG_RANGE,
};
struct page *page;
@@ -6579,7 +6579,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
return (ret < 0) ? ret : 0;
}
-static void split_free_pages(struct list_head *list)
+static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
{
int order;
@@ -6590,7 +6590,8 @@ static void split_free_pages(struct list_head *list)
list_for_each_entry_safe(page, next, &list[order], lru) {
int i;
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ post_alloc_hook(page, order, gfp_mask);
+ set_page_refcounted(page);
if (!order)
continue;
@@ -6604,6 +6605,41 @@ static void split_free_pages(struct list_head *list)
}
}
+static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
+{
+ const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
+ const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO |
+ __GFP_SKIP_KASAN;
+ const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+ /*
+ * We are given the range to allocate; node, mobility and placement
+ * hints are irrelevant at this point. We'll simply ignore them.
+ */
+ gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
+ __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
+
+ /*
+ * We only support most reclaim flags (but not NOFAIL/NORETRY), and
+ * selected action flags.
+ */
+ if (gfp_mask & ~(reclaim_mask | action_mask))
+ return -EINVAL;
+
+ /*
+ * Flags to control page compaction/migration/reclaim, to free up our
+ * page range. Migratable pages are movable, __GFP_MOVABLE is implied
+ * for them.
+ *
+ * Traditionally we always had __GFP_HARDWALL|__GFP_RETRY_MAYFAIL set,
+ * keep doing that to not degrade callers.
+ */
+ *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
+ __GFP_HARDWALL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+ return 0;
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -6612,7 +6648,9 @@ static void split_free_pages(struct list_head *list)
* #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
* in range must have the same migratetype and it must
* be either of the two.
- * @gfp_mask: GFP mask to use during compaction
+ * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
+ * action and reclaim modifiers are supported. Reclaim modifiers
+ * control allocation behavior during compaction/migration/reclaim.
*
* The PFN range does not have to be pageblock aligned. The PFN range must
* belong to a single zone.
@@ -6638,11 +6676,14 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
- .gfp_mask = current_gfp_context(gfp_mask),
.alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
+ gfp_mask = current_gfp_context(gfp_mask);
+ if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
+ return -EINVAL;
+
/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because pageblock and max order pages may
@@ -6717,7 +6758,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
}
if (!(gfp_mask & __GFP_COMP)) {
- split_free_pages(cc.freepages);
+ split_free_pages(cc.freepages, gfp_mask);
/* Free head and tail (if any) */
if (start != outer_start)
@@ -6784,7 +6825,9 @@ static bool zone_spans_last_pfn(const struct zone *zone,
/**
* alloc_contig_pages() -- tries to find and allocate contiguous range of pages
* @nr_pages: Number of contiguous pages to allocate
- * @gfp_mask: GFP mask to limit search and used during compaction
+ * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
+ * action and reclaim modifiers are supported. Reclaim modifiers
+ * control allocation behavior during compaction/migration/reclaim.
* @nid: Target node
* @nodemask: Mask for other possible nodes
*
diff --git a/mm/shmem.c b/mm/shmem.c
index 5e8184821fac..c92af39eebdd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -499,15 +499,27 @@ static int shmem_replace_entry(struct address_space *mapping,
/*
* Sometimes, before we decide whether to proceed or to fail, we must check
- * that an entry was not already brought back from swap by a racing thread.
+ * that an entry was not already brought back or split by a racing thread.
*
* Checking folio is not enough: by the time a swapcache folio is locked, it
* might be reused, and again be swapcache, using the same swap as before.
+ * Returns the swap entry's order if it still presents, else returns -1.
*/
-static bool shmem_confirm_swap(struct address_space *mapping,
- pgoff_t index, swp_entry_t swap)
+static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index,
+ swp_entry_t swap)
{
- return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
+ XA_STATE(xas, &mapping->i_pages, index);
+ int ret = -1;
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_load(&xas);
+ if (entry == swp_to_radix_entry(swap))
+ ret = xas_get_order(&xas);
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+ return ret;
}
/*
@@ -794,7 +806,9 @@ static int shmem_add_to_page_cache(struct folio *folio,
pgoff_t index, void *expected, gfp_t gfp)
{
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
- long nr = folio_nr_pages(folio);
+ unsigned long nr = folio_nr_pages(folio);
+ swp_entry_t iter, swap;
+ void *entry;
VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -806,14 +820,25 @@ static int shmem_add_to_page_cache(struct folio *folio,
gfp &= GFP_RECLAIM_MASK;
folio_throttle_swaprate(folio, gfp);
+ swap = radix_to_swp_entry(expected);
do {
+ iter = swap;
xas_lock_irq(&xas);
- if (expected != xas_find_conflict(&xas)) {
- xas_set_err(&xas, -EEXIST);
- goto unlock;
+ xas_for_each_conflict(&xas, entry) {
+ /*
+ * The range must either be empty, or filled with
+ * expected swap entries. Shmem swap entries are never
+ * partially freed without split of both entry and
+ * folio, so there shouldn't be any holes.
+ */
+ if (!expected || entry != swp_to_radix_entry(iter)) {
+ xas_set_err(&xas, -EEXIST);
+ goto unlock;
+ }
+ iter.val += 1 << xas_get_order(&xas);
}
- if (expected && xas_find_conflict(&xas)) {
+ if (expected && iter.val - nr != swap.val) {
xas_set_err(&xas, -EEXIST);
goto unlock;
}
@@ -2132,7 +2157,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct swap_info_struct *si;
struct folio *folio = NULL;
swp_entry_t swap;
- int error, nr_pages;
+ int error, nr_pages, order, split_order;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
swap = radix_to_swp_entry(*foliop);
@@ -2142,17 +2167,21 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
return -EIO;
si = get_swap_device(swap);
- if (!si) {
- if (!shmem_confirm_swap(mapping, index, swap))
+ order = shmem_confirm_swap(mapping, index, swap);
+ if (unlikely(!si)) {
+ if (order < 0)
return -EEXIST;
else
return -EINVAL;
}
+ if (unlikely(order < 0)) {
+ put_swap_device(si);
+ return -EEXIST;
+ }
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
if (!folio) {
- int split_order;
/* Or update major stats only when swapin succeeds?? */
if (fault_type) {
@@ -2189,13 +2218,47 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
error = -ENOMEM;
goto failed;
}
+ } else if (order > folio_order(folio)) {
+ /*
+ * Swap readahead may swap in order 0 folios into swapcache
+ * asynchronously, while the shmem mapping can still stores
+ * large swap entries. In such cases, we should split the
+ * large swap entry to prevent possible data corruption.
+ */
+ split_order = shmem_split_large_entry(inode, index, swap, gfp);
+ if (split_order < 0) {
+ folio_put(folio);
+ folio = NULL;
+ error = split_order;
+ goto failed;
+ }
+
+ /*
+ * If the large swap entry has already been split, it is
+ * necessary to recalculate the new swap entry based on
+ * the old order alignment.
+ */
+ if (split_order > 0) {
+ pgoff_t offset = index - round_down(index, 1 << split_order);
+
+ swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ }
+ } else if (order < folio_order(folio)) {
+ swap.val = round_down(swap.val, 1 << folio_order(folio));
+ index = round_down(index, 1 << folio_order(folio));
}
- /* We have to do this with folio locked to prevent races */
+ /*
+ * We have to do this with the folio locked to prevent races.
+ * The shmem_confirm_swap below only checks if the first swap
+ * entry matches the folio, that's enough to ensure the folio
+ * is not used outside of shmem, as shmem swap entries
+ * and swap cache folios are never partially freed.
+ */
folio_lock(folio);
if (!folio_test_swapcache(folio) ||
- folio->swap.val != swap.val ||
- !shmem_confirm_swap(mapping, index, swap)) {
+ shmem_confirm_swap(mapping, index, swap) < 0 ||
+ folio->swap.val != swap.val) {
error = -EEXIST;
goto unlock;
}
@@ -2237,7 +2300,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
*foliop = folio;
return 0;
failed:
- if (!shmem_confirm_swap(mapping, index, swap))
+ if (shmem_confirm_swap(mapping, index, swap) < 0)
error = -EEXIST;
if (error == -EIO)
shmem_set_folio_swapin_error(inode, index, folio, swap);
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 209180b4c268..c31edbd7c2ab 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -464,6 +464,9 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
!time_after_eq(aggregation_end_time, forw_packet->send_time))
return false;
+ if (skb_tailroom(forw_packet->skb) < packet_len)
+ return false;
+
if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
return false;
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index b065578b4436..44deb3ac7712 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -112,7 +112,15 @@ static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh,
/* unsupported WiFi driver version */
goto default_throughput;
- real_netdev = batadv_get_real_netdev(hard_iface->net_dev);
+ /* only use rtnl_trylock because the elp worker will be cancelled while
+ * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise
+ * wait forever when the elp work_item was started and it is then also
+ * trying to rtnl_lock
+ */
+ if (!rtnl_trylock())
+ return false;
+ real_netdev = __batadv_get_real_netdev(hard_iface->net_dev);
+ rtnl_unlock();
if (!real_netdev)
goto default_throughput;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 96a412beab2d..c142e629e4d7 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -203,7 +203,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
}
/**
- * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
+ * __batadv_get_real_netdev() - check if the given netdev struct is a virtual
* interface on top of another 'real' interface
* @netdev: the device to check
*
@@ -213,7 +213,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
* Return: the 'real' net device or the original net device and NULL in case
* of an error.
*/
-static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
+struct net_device *__batadv_get_real_netdev(struct net_device *netdev)
{
struct batadv_hard_iface *hard_iface = NULL;
struct net_device *real_netdev = NULL;
@@ -266,7 +266,7 @@ struct net_device *batadv_get_real_netdev(struct net_device *net_device)
struct net_device *real_netdev;
rtnl_lock();
- real_netdev = batadv_get_real_netdevice(net_device);
+ real_netdev = __batadv_get_real_netdev(net_device);
rtnl_unlock();
return real_netdev;
@@ -335,7 +335,7 @@ static u32 batadv_wifi_flags_evaluate(struct net_device *net_device)
if (batadv_is_cfg80211_netdev(net_device))
wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT;
- real_netdev = batadv_get_real_netdevice(net_device);
+ real_netdev = __batadv_get_real_netdev(net_device);
if (!real_netdev)
return wifi_flags;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 64f660dbbe54..c7c2f17e6a46 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -68,6 +68,7 @@ enum batadv_hard_if_bcast {
extern struct notifier_block batadv_hard_if_notifier;
+struct net_device *__batadv_get_real_netdev(struct net_device *net_device);
struct net_device *batadv_get_real_netdev(struct net_device *net_device);
bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface);
bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index fa74fac5af77..447d29c67e7c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1868,6 +1868,8 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
return false;
done:
+ conn->iso_qos = *qos;
+
if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
UINT_PTR(qos->ucast.cig), NULL) < 0)
return false;
@@ -1934,8 +1936,6 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
}
hci_conn_hold(cis);
-
- cis->iso_qos = *qos;
cis->state = BT_BOUND;
return cis;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 00de90fee44a..1656448649b9 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -6552,8 +6552,8 @@ static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
* state.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- hci_scan_disable_sync(hdev);
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
+ hci_scan_disable_sync(hdev);
}
/* Update random address, but set require_privacy to false so
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 707f229f896a..40a6f1e20bab 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -986,7 +986,8 @@ static void session_free(struct kref *ref)
skb_queue_purge(&session->intr_transmit);
fput(session->intr_sock->file);
fput(session->ctrl_sock->file);
- l2cap_conn_put(session->conn);
+ if (session->conn)
+ l2cap_conn_put(session->conn);
kfree(session);
}
@@ -1164,6 +1165,15 @@ static void hidp_session_remove(struct l2cap_conn *conn,
down_write(&hidp_session_sem);
+ /* Drop L2CAP reference immediately to indicate that
+ * l2cap_unregister_user() shall not be called as it is already
+ * considered removed.
+ */
+ if (session->conn) {
+ l2cap_conn_put(session->conn);
+ session->conn = NULL;
+ }
+
hidp_session_terminate(session);
cancel_work_sync(&session->dev_init);
@@ -1301,7 +1311,9 @@ static int hidp_session_thread(void *arg)
* Instead, this call has the same semantics as if user-space tried to
* delete the session.
*/
- l2cap_unregister_user(session->conn, &session->user);
+ if (session->conn)
+ l2cap_unregister_user(session->conn, &session->user);
+
hidp_session_put(session);
module_put_and_kthread_exit(0);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 61fd5a01fbca..7c131e4640b7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1686,17 +1686,15 @@ static void l2cap_info_timeout(struct work_struct *work)
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
- struct hci_dev *hdev = conn->hcon->hdev;
int ret;
/* We need to check whether l2cap_conn is registered. If it is not, we
- * must not register the l2cap_user. l2cap_conn_del() is unregisters
- * l2cap_conn objects, but doesn't provide its own locking. Instead, it
- * relies on the parent hci_conn object to be locked. This itself relies
- * on the hci_dev object to be locked. So we must lock the hci device
- * here, too. */
+ * must not register the l2cap_user. l2cap_conn_del() unregisters
+ * l2cap_conn objects under conn->lock, and we use the same lock here
+ * to protect access to conn->users and conn->hchan.
+ */
- hci_dev_lock(hdev);
+ mutex_lock(&conn->lock);
if (!list_empty(&user->list)) {
ret = -EINVAL;
@@ -1717,16 +1715,14 @@ int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
ret = 0;
out_unlock:
- hci_dev_unlock(hdev);
+ mutex_unlock(&conn->lock);
return ret;
}
EXPORT_SYMBOL(l2cap_register_user);
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
{
- struct hci_dev *hdev = conn->hcon->hdev;
-
- hci_dev_lock(hdev);
+ mutex_lock(&conn->lock);
if (list_empty(&user->list))
goto out_unlock;
@@ -1735,7 +1731,7 @@ void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
user->remove(conn, user);
out_unlock:
- hci_dev_unlock(hdev);
+ mutex_unlock(&conn->lock);
}
EXPORT_SYMBOL(l2cap_unregister_user);
@@ -4587,7 +4583,8 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
switch (type) {
case L2CAP_IT_FEAT_MASK:
- conn->feat_mask = get_unaligned_le32(rsp->data);
+ if (cmd_len >= sizeof(*rsp) + sizeof(u32))
+ conn->feat_mask = get_unaligned_le32(rsp->data);
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
struct l2cap_info_req req;
@@ -4606,7 +4603,8 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
break;
case L2CAP_IT_FIXED_CHAN:
- conn->remote_fixed_chan = rsp->data[0];
+ if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0]))
+ conn->remote_fixed_chan = rsp->data[0];
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
@@ -5008,7 +5006,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
u16 mtu, mps;
__le16 psm;
u8 result, rsp_len = 0;
- int i, num_scid;
+ int i, num_scid = 0;
bool defer = false;
if (!enable_ecred)
@@ -5021,6 +5019,14 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
goto response;
}
+ /* Check if there are no pending channels with the same ident */
+ __l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer,
+ &num_scid);
+ if (num_scid) {
+ result = L2CAP_CR_LE_INVALID_PARAMS;
+ goto response;
+ }
+
cmd_len -= sizeof(*req);
num_scid = cmd_len / sizeof(u16);
@@ -5373,7 +5379,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
u8 *data)
{
struct l2cap_chan *chan, *tmp;
- struct l2cap_ecred_conn_rsp *rsp = (void *) data;
+ struct l2cap_ecred_reconf_rsp *rsp = (void *)data;
u16 result;
if (cmd_len < sizeof(*rsp))
@@ -5381,7 +5387,7 @@ static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
result = __le16_to_cpu(rsp->result);
- BT_DBG("result 0x%4.4x", rsp->result);
+ BT_DBG("result 0x%4.4x", result);
if (!result)
return 0;
@@ -6609,8 +6615,10 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
return -ENOBUFS;
}
- if (chan->imtu < skb->len) {
- BT_ERR("Too big LE L2CAP PDU");
+ if (skb->len > chan->imtu) {
+ BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len,
+ chan->imtu);
+ l2cap_send_disconn_req(chan, ECONNRESET);
return -ENOBUFS;
}
@@ -6636,7 +6644,9 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
sdu_len, skb->len, chan->imtu);
if (sdu_len > chan->imtu) {
- BT_ERR("Too big LE L2CAP SDU length received");
+ BT_ERR("Too big LE L2CAP SDU length: len %u > %u",
+ skb->len, sdu_len);
+ l2cap_send_disconn_req(chan, ECONNRESET);
err = -EMSGSIZE;
goto failed;
}
@@ -6672,6 +6682,7 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
if (chan->sdu->len + skb->len > chan->sdu_len) {
BT_ERR("Too much LE L2CAP data received");
+ l2cap_send_disconn_req(chan, ECONNRESET);
err = -EINVAL;
goto failed;
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4894e6444900..b1df591a5380 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2172,10 +2172,7 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
sk = cmd->sk;
if (status) {
- mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
- status);
- mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
- cmd_status_rsp, &status);
+ mgmt_cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
goto done;
}
@@ -5354,7 +5351,7 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
mgmt_status(status), &rp, sizeof(rp));
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
bt_dev_dbg(hdev, "add monitor %d complete, status %d",
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3a33fd06e6a4..204c5fe3a8d0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2743,7 +2743,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) &&
!crypto_memneq(key, smp->local_pk, 64)) {
bt_dev_err(hdev, "Remote and local public keys are identical");
- return SMP_UNSPECIFIED;
+ return SMP_DHKEY_CHECK_FAILED;
}
memcpy(smp->remote_pk, key, 64);
diff --git a/net/bridge/br_cfm.c b/net/bridge/br_cfm.c
index a3c755d0a09d..ffa571e38c54 100644
--- a/net/bridge/br_cfm.c
+++ b/net/bridge/br_cfm.c
@@ -576,7 +576,7 @@ static void mep_delete_implementation(struct net_bridge *br,
/* Empty and free peer MEP list */
hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) {
- cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
+ disable_delayed_work_sync(&peer_mep->ccm_rx_dwork);
hlist_del_rcu(&peer_mep->head);
kfree_rcu(peer_mep, rcu);
}
@@ -732,7 +732,7 @@ int br_cfm_cc_peer_mep_remove(struct net_bridge *br, const u32 instance,
return -ENOENT;
}
- cc_peer_disable(peer_mep);
+ disable_delayed_work_sync(&peer_mep->ccm_rx_dwork);
hlist_del_rcu(&peer_mep->head);
kfree_rcu(peer_mep, rcu);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index d38c9eadbe2f..0d75679c6a7e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -205,9 +205,9 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
s32 result;
u64 global_id;
void *payload, *payload_end;
- int payload_len;
+ u32 payload_len;
char *result_msg;
- int result_msg_len;
+ u32 result_msg_len;
int ret = -EINVAL;
mutex_lock(&ac->mutex);
@@ -217,10 +217,12 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
result = ceph_decode_32(&p);
global_id = ceph_decode_64(&p);
payload_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, payload_len, bad);
payload = p;
p += payload_len;
ceph_decode_need(&p, end, sizeof(u32), bad);
result_msg_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, result_msg_len, bad);
result_msg = p;
p += result_msg_len;
if (p != end)
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index 83c29714226f..476d62bdeab8 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -392,7 +392,7 @@ static int head_onwire_len(int ctrl_len, bool secure)
int head_len;
int rem_len;
- BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
+ BUG_ON(ctrl_len < 1 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
if (secure) {
head_len = CEPH_PREAMBLE_SECURE_LEN;
@@ -401,9 +401,7 @@ static int head_onwire_len(int ctrl_len, bool secure)
head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
}
} else {
- head_len = CEPH_PREAMBLE_PLAIN_LEN;
- if (ctrl_len)
- head_len += ctrl_len + CEPH_CRC_LEN;
+ head_len = CEPH_PREAMBLE_PLAIN_LEN + ctrl_len + CEPH_CRC_LEN;
}
return head_len;
}
@@ -528,11 +526,16 @@ static int decode_preamble(void *p, struct ceph_frame_desc *desc)
desc->fd_aligns[i] = ceph_decode_16(&p);
}
- if (desc->fd_lens[0] < 0 ||
+ /*
+ * This would fire for FRAME_TAG_WAIT (it has one empty
+ * segment), but we should never get it as client.
+ */
+ if (desc->fd_lens[0] < 1 ||
desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
pr_err("bad control segment length %d\n", desc->fd_lens[0]);
return -EINVAL;
}
+
if (desc->fd_lens[1] < 0 ||
desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
pr_err("bad front segment length %d\n", desc->fd_lens[1]);
@@ -549,10 +552,6 @@ static int decode_preamble(void *p, struct ceph_frame_desc *desc)
return -EINVAL;
}
- /*
- * This would fire for FRAME_TAG_WAIT (it has one empty
- * segment), but we should never get it as client.
- */
if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
pr_err("last segment empty, segment count %d\n",
desc->fd_seg_cnt);
@@ -2865,12 +2864,15 @@ static int process_message_header(struct ceph_connection *con,
void *p, void *end)
{
struct ceph_frame_desc *desc = &con->v2.in_desc;
- struct ceph_msg_header2 *hdr2 = p;
+ struct ceph_msg_header2 *hdr2;
struct ceph_msg_header hdr;
int skip;
int ret;
u64 seq;
+ ceph_decode_need(&p, end, sizeof(*hdr2), bad);
+ hdr2 = p;
+
/* verify seq# */
seq = le64_to_cpu(hdr2->seq);
if ((s64)seq - (s64)con->in_seq < 1) {
@@ -2901,6 +2903,10 @@ static int process_message_header(struct ceph_connection *con,
WARN_ON(!con->in_msg);
WARN_ON(con->in_msg->con != con);
return 1;
+
+bad:
+ pr_err("failed to decode message header\n");
+ return -EINVAL;
}
static int process_message(struct ceph_connection *con)
@@ -2930,6 +2936,11 @@ static int __handle_control(struct ceph_connection *con, void *p)
if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
return process_control(con, p, end);
+ if (con->state != CEPH_CON_S_OPEN) {
+ con->error_msg = "protocol error, unexpected message";
+ return -EINVAL;
+ }
+
ret = process_message_header(con, p, end);
if (ret < 0)
return ret;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 3909a8156690..ae12f2dbed9e 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -72,8 +72,8 @@ static struct ceph_monmap *ceph_monmap_decode(void **p, void *end, bool msgr2)
struct ceph_monmap *monmap = NULL;
struct ceph_fsid fsid;
u32 struct_len;
- int blob_len;
- int num_mon;
+ u32 blob_len;
+ u32 num_mon;
u8 struct_v;
u32 epoch;
int ret;
@@ -112,7 +112,7 @@ static struct ceph_monmap *ceph_monmap_decode(void **p, void *end, bool msgr2)
}
ceph_decode_32_safe(p, end, num_mon, e_inval);
- dout("%s fsid %pU epoch %u num_mon %d\n", __func__, &fsid, epoch,
+ dout("%s fsid %pU epoch %u num_mon %u\n", __func__, &fsid, epoch,
num_mon);
if (num_mon > CEPH_MAX_MON)
goto e_inval;
diff --git a/net/core/dev.c b/net/core/dev.c
index e7127eca1afc..336257b515f0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -460,7 +460,7 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
* PP consumers must pay attention to run APIs in the appropriate context
* (e.g. NAPI context).
*/
-static DEFINE_PER_CPU(struct page_pool *, system_page_pool);
+DEFINE_PER_CPU(struct page_pool *, system_page_pool);
#ifdef CONFIG_LOCKDEP
/*
@@ -11851,6 +11851,94 @@ netdev_features_t netdev_increment_features(netdev_features_t all,
}
EXPORT_SYMBOL(netdev_increment_features);
+/**
+ * netdev_compute_master_upper_features - compute feature from lowers
+ * @dev: the upper device
+ * @update_header: whether to update upper device's header_len/headroom/tailroom
+ *
+ * Recompute the upper device's feature based on all lower devices.
+ */
+void netdev_compute_master_upper_features(struct net_device *dev, bool update_header)
+{
+ unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
+ netdev_features_t gso_partial_features = MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES;
+ netdev_features_t xfrm_features = MASTER_UPPER_DEV_XFRM_FEATURES;
+ netdev_features_t mpls_features = MASTER_UPPER_DEV_MPLS_FEATURES;
+ netdev_features_t vlan_features = MASTER_UPPER_DEV_VLAN_FEATURES;
+ netdev_features_t enc_features = MASTER_UPPER_DEV_ENC_FEATURES;
+ unsigned short max_header_len = ETH_HLEN;
+ unsigned int tso_max_size = TSO_MAX_SIZE;
+ unsigned short max_headroom = 0;
+ unsigned short max_tailroom = 0;
+ u16 tso_max_segs = TSO_MAX_SEGS;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ mpls_features = netdev_base_features(mpls_features);
+ vlan_features = netdev_base_features(vlan_features);
+ enc_features = netdev_base_features(enc_features);
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ gso_partial_features = netdev_increment_features(gso_partial_features,
+ lower_dev->gso_partial_features,
+ MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES);
+
+ vlan_features = netdev_increment_features(vlan_features,
+ lower_dev->vlan_features,
+ MASTER_UPPER_DEV_VLAN_FEATURES);
+
+ enc_features = netdev_increment_features(enc_features,
+ lower_dev->hw_enc_features,
+ MASTER_UPPER_DEV_ENC_FEATURES);
+
+ if (IS_ENABLED(CONFIG_XFRM_OFFLOAD))
+ xfrm_features = netdev_increment_features(xfrm_features,
+ lower_dev->hw_enc_features,
+ MASTER_UPPER_DEV_XFRM_FEATURES);
+
+ mpls_features = netdev_increment_features(mpls_features,
+ lower_dev->mpls_features,
+ MASTER_UPPER_DEV_MPLS_FEATURES);
+
+ dst_release_flag &= lower_dev->priv_flags;
+
+ if (update_header) {
+ max_header_len = max(max_header_len, lower_dev->hard_header_len);
+ max_headroom = max(max_headroom, lower_dev->needed_headroom);
+ max_tailroom = max(max_tailroom, lower_dev->needed_tailroom);
+ }
+
+ tso_max_size = min(tso_max_size, lower_dev->tso_max_size);
+ tso_max_segs = min(tso_max_segs, lower_dev->tso_max_segs);
+ }
+
+ dev->gso_partial_features = gso_partial_features;
+ dev->vlan_features = vlan_features;
+ dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ if (IS_ENABLED(CONFIG_XFRM_OFFLOAD))
+ dev->hw_enc_features |= xfrm_features;
+ dev->mpls_features = mpls_features;
+
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ if ((dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
+ dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
+ dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+
+ if (update_header) {
+ dev->hard_header_len = max_header_len;
+ dev->needed_headroom = max_headroom;
+ dev->needed_tailroom = max_tailroom;
+ }
+
+ netif_set_tso_max_segs(dev, tso_max_segs);
+ netif_set_tso_max_size(dev, tso_max_size);
+
+ netdev_change_features(dev);
+}
+EXPORT_SYMBOL(netdev_compute_master_upper_features);
+
static struct hlist_head * __net_init netdev_create_hash(void)
{
int i;
@@ -12137,11 +12225,18 @@ static int net_page_pool_create(int cpuid)
.nid = cpu_to_mem(cpuid),
};
struct page_pool *pp_ptr;
+ int err;
pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
if (IS_ERR(pp_ptr))
return -ENOMEM;
+ err = xdp_reg_page_pool(pp_ptr);
+ if (err) {
+ page_pool_destroy(pp_ptr);
+ return err;
+ }
+
per_cpu(system_page_pool, cpuid) = pp_ptr;
#endif
return 0;
@@ -12275,6 +12370,7 @@ static int __init net_dev_init(void)
if (!pp_ptr)
continue;
+ xdp_unreg_page_pool(pp_ptr);
page_pool_destroy(pp_ptr);
per_cpu(system_page_pool, i) = NULL;
}
diff --git a/net/core/dev.h b/net/core/dev.h
index 764e0097ccf2..e0603dcb6aa1 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -162,41 +162,6 @@ static inline void napi_assert_will_not_race(const struct napi_struct *napi)
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
-#define XMIT_RECURSION_LIMIT 8
-
-#ifndef CONFIG_PREEMPT_RT
-static inline bool dev_xmit_recursion(void)
-{
- return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
- XMIT_RECURSION_LIMIT);
-}
-
-static inline void dev_xmit_recursion_inc(void)
-{
- __this_cpu_inc(softnet_data.xmit.recursion);
-}
-
-static inline void dev_xmit_recursion_dec(void)
-{
- __this_cpu_dec(softnet_data.xmit.recursion);
-}
-#else
-static inline bool dev_xmit_recursion(void)
-{
- return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
-}
-
-static inline void dev_xmit_recursion_inc(void)
-{
- current->net_xmit.recursion++;
-}
-
-static inline void dev_xmit_recursion_dec(void)
-{
- current->net_xmit.recursion--;
-}
-#endif
-
int dev_set_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack);
diff --git a/net/core/dst.c b/net/core/dst.c
index 8dbb54148c03..92aa81b2f331 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -68,6 +68,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
dst->lwtstate = NULL;
rcuref_init(&dst->__rcuref, 1);
INIT_LIST_HEAD(&dst->rt_uncached);
+ dst->rt_uncached_list = NULL;
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 23e7d736718b..8a3ea90e8cf9 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -365,6 +365,62 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
+/**
+ * xdp_reg_page_pool - register &page_pool as a memory provider for XDP
+ * @pool: &page_pool to register
+ *
+ * Can be used to register pools manually without connecting to any XDP RxQ
+ * info, so that the XDP layer will be aware of them. Then, they can be
+ * attached to an RxQ info manually via xdp_rxq_info_attach_page_pool().
+ *
+ * Return: %0 on success, -errno on error.
+ */
+int xdp_reg_page_pool(struct page_pool *pool)
+{
+ struct xdp_mem_info mem;
+
+ return xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pool);
+}
+EXPORT_SYMBOL_GPL(xdp_reg_page_pool);
+
+/**
+ * xdp_unreg_page_pool - unregister &page_pool from the memory providers list
+ * @pool: &page_pool to unregister
+ *
+ * A shorthand for manual unregistering page pools. If the pool was previously
+ * attached to an RxQ info, it must be detached first.
+ */
+void xdp_unreg_page_pool(const struct page_pool *pool)
+{
+ struct xdp_mem_info mem = {
+ .type = MEM_TYPE_PAGE_POOL,
+ .id = pool->xdp_mem_id,
+ };
+
+ xdp_unreg_mem_model(&mem);
+}
+EXPORT_SYMBOL_GPL(xdp_unreg_page_pool);
+
+/**
+ * xdp_rxq_info_attach_page_pool - attach registered pool to RxQ info
+ * @xdp_rxq: XDP RxQ info to attach the pool to
+ * @pool: pool to attach
+ *
+ * If the pool was registered manually, this function must be called instead
+ * of xdp_rxq_info_reg_mem_model() to connect it to the RxQ info.
+ */
+void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
+ const struct page_pool *pool)
+{
+ struct xdp_mem_info mem = {
+ .type = MEM_TYPE_PAGE_POOL,
+ .id = pool->xdp_mem_id,
+ };
+
+ xdp_rxq_info_attach_mem_model(xdp_rxq, &mem);
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_attach_page_pool);
+
/* XDP RX runs under NAPI protection, and in different delivery error
* scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 76a086e846c4..95f87e1f90af 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -1246,14 +1246,25 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
if (ethernet) {
struct net_device *conduit;
const char *user_protocol;
+ int err;
+ rtnl_lock();
conduit = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
- if (!conduit)
+ if (!conduit) {
+ rtnl_unlock();
return -EPROBE_DEFER;
+ }
+
+ netdev_hold(conduit, &dp->conduit_tracker, GFP_KERNEL);
+ put_device(&conduit->dev);
+ rtnl_unlock();
user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
- return dsa_port_parse_cpu(dp, conduit, user_protocol);
+ err = dsa_port_parse_cpu(dp, conduit, user_protocol);
+ if (err)
+ netdev_put(conduit, &dp->conduit_tracker);
+ return err;
}
if (link)
@@ -1386,37 +1397,30 @@ static struct device *dev_find_class(struct device *parent, char *class)
return device_find_child(parent, class, dev_is_class);
}
-static struct net_device *dsa_dev_to_net_device(struct device *dev)
-{
- struct device *d;
-
- d = dev_find_class(dev, "net");
- if (d != NULL) {
- struct net_device *nd;
-
- nd = to_net_dev(d);
- dev_hold(nd);
- put_device(d);
-
- return nd;
- }
-
- return NULL;
-}
-
static int dsa_port_parse(struct dsa_port *dp, const char *name,
struct device *dev)
{
if (!strcmp(name, "cpu")) {
struct net_device *conduit;
+ struct device *d;
+ int err;
- conduit = dsa_dev_to_net_device(dev);
- if (!conduit)
+ rtnl_lock();
+ d = dev_find_class(dev, "net");
+ if (!d) {
+ rtnl_unlock();
return -EPROBE_DEFER;
+ }
- dev_put(conduit);
+ conduit = to_net_dev(d);
+ netdev_hold(conduit, &dp->conduit_tracker, GFP_KERNEL);
+ put_device(d);
+ rtnl_unlock();
- return dsa_port_parse_cpu(dp, conduit, NULL);
+ err = dsa_port_parse_cpu(dp, conduit, NULL);
+ if (err)
+ netdev_put(conduit, &dp->conduit_tracker);
+ return err;
}
if (!strcmp(name, "dsa"))
@@ -1484,6 +1488,9 @@ static void dsa_switch_release_ports(struct dsa_switch *ds)
struct dsa_vlan *v, *n;
dsa_switch_for_each_port_safe(dp, next, ds) {
+ if (dsa_port_is_cpu(dp) && dp->conduit)
+ netdev_put(dp->conduit, &dp->conduit_tracker);
+
/* These are either entries that upper layers lost track of
* (probably due to bugs), or installed through interfaces
* where one does not necessarily have to remove them, like
@@ -1636,8 +1643,10 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
/* Disconnect from further netdevice notifiers on the conduit,
* since netdev_uses_dsa() will now return false.
*/
- dsa_switch_for_each_cpu_port(dp, ds)
+ dsa_switch_for_each_cpu_port(dp, ds) {
dp->conduit->dsa_ptr = NULL;
+ netdev_put(dp->conduit, &dp->conduit_tracker);
+ }
rtnl_unlock();
out:
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 43e211e611b1..ca4e3a01237d 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -193,14 +193,11 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
}
EXPORT_SYMBOL(eth_type_trans);
-/**
- * eth_header_parse - extract hardware address from packet
- * @skb: packet to extract header from
- * @haddr: destination buffer
- */
-int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr)
{
const struct ethhdr *eth = eth_hdr(skb);
+
memcpy(haddr, eth->h_source, ETH_ALEN);
return ETH_ALEN;
}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 6d2c97f8e9ef..4cfd87b28c04 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -748,6 +748,7 @@ config TCP_SIGPOOL
config TCP_AO
bool "TCP: Authentication Option (RFC5925)"
select CRYPTO
+ select CRYPTO_LIB_UTILS
select TCP_SIGPOOL
depends on 64BIT && IPV6 != m # seq-number extension needs WRITE_ONCE(u64)
help
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 8ab51b51cc9b..58feb21ff967 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -877,10 +877,12 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
static bool icmp_tag_validation(int proto)
{
+ const struct net_protocol *ipprot;
bool ok;
rcu_read_lock();
- ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation;
+ ipprot = rcu_dereference(inet_protos[proto]);
+ ok = ipprot ? ipprot->icmp_strict_tag_validation : false;
rcu_read_unlock();
return ok;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index be85dbe74ac8..084556b03a2e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -917,7 +917,8 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
return -(t->hlen + sizeof(*iph));
}
-static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+static int ipgre_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr)
{
const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
memcpy(haddr, &iph->saddr, 4);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 8392d304a72e..507f2f9ec400 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -57,6 +57,19 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
struct iphdr *iph;
int err;
+ if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) {
+ if (dev) {
+ net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
+ dev->name);
+ DEV_STATS_INC(dev, tx_errors);
+ }
+ ip_rt_put(rt);
+ kfree_skb(skb);
+ return;
+ }
+
+ dev_xmit_recursion_inc();
+
skb_scrub_packet(skb, xnet);
skb_clear_hash_if_not_l4(skb);
@@ -86,6 +99,8 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
pkt_len = 0;
iptunnel_xmit_stats(dev, pkt_len);
}
+
+ dev_xmit_recursion_dec();
}
EXPORT_SYMBOL_GPL(iptunnel_xmit);
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index ee2e62ac0dcf..e3e3f3ee9a5b 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1992,7 +1992,8 @@ static void nh_hthr_group_rebalance(struct nh_group *nhg)
}
static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
- struct nl_info *nlinfo)
+ struct nl_info *nlinfo,
+ struct list_head *deferred_free)
{
struct nh_grp_entry *nhges, *new_nhges;
struct nexthop *nhp = nhge->nh_parent;
@@ -2052,8 +2053,8 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
rcu_assign_pointer(nhp->nh_grp, newg);
list_del(&nhge->nh_list);
- free_percpu(nhge->stats);
nexthop_put(nhge->nh);
+ list_add(&nhge->nh_list, deferred_free);
/* Removal of a NH from a resilient group is notified through
* bucket notifications.
@@ -2073,6 +2074,7 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
struct nl_info *nlinfo)
{
struct nh_grp_entry *nhge, *tmp;
+ LIST_HEAD(deferred_free);
/* If there is nothing to do, let's avoid the costly call to
* synchronize_net()
@@ -2081,10 +2083,16 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
return;
list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
- remove_nh_grp_entry(net, nhge, nlinfo);
+ remove_nh_grp_entry(net, nhge, nlinfo, &deferred_free);
/* make sure all see the newly published array before releasing rtnl */
synchronize_net();
+
+ /* Now safe to free percpu stats — all RCU readers have finished */
+ list_for_each_entry_safe(nhge, tmp, &deferred_free, nh_list) {
+ list_del(&nhge->nh_list);
+ free_percpu(nhge->stats);
+ }
}
static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7579001d5b29..4dce0de6ab89 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1533,9 +1533,9 @@ void rt_add_uncached_list(struct rtable *rt)
void rt_del_uncached_list(struct rtable *rt)
{
- if (!list_empty(&rt->dst.rt_uncached)) {
- struct uncached_list *ul = rt->dst.rt_uncached_list;
+ struct uncached_list *ul = rt->dst.rt_uncached_list;
+ if (ul) {
spin_lock_bh(&ul->lock);
list_del_init(&rt->dst.rt_uncached);
spin_unlock_bh(&ul->lock);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4090107b0c4d..c532803b9957 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -244,6 +244,7 @@
#define pr_fmt(fmt) "TCP: " fmt
#include <crypto/hash.h>
+#include <crypto/utils.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -4783,7 +4784,7 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
else
genhash = tp->af_specific->calc_md5_hash(newhash, key,
NULL, skb);
- if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+ if (genhash || crypto_memneq(hash_location, newhash, 16)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
trace_tcp_hash_md5_mismatch(sk, skb);
return SKB_DROP_REASON_TCP_MD5FAILURE;
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 3338b6cc85c4..72957523c2ec 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) "TCP: " fmt
#include <crypto/hash.h>
+#include <crypto/utils.h>
#include <linux/inetdevice.h>
#include <linux/tcp.h>
@@ -923,7 +924,7 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
/* XXX: make it per-AF callback? */
tcp_ao_hash_skb(family, hash_buf, key, sk, skb, traffic_key,
(phash - (u8 *)th), sne);
- if (memcmp(phash, hash_buf, maclen)) {
+ if (crypto_memneq(phash, hash_buf, maclen)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
atomic64_inc(&info->counters.pkt_bad);
atomic64_inc(&key->pkt_bad);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1572562b0498..6e896f1641af 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -82,6 +82,7 @@
#include <linux/skbuff_ref.h>
#include <crypto/hash.h>
+#include <crypto/utils.h>
#include <linux/scatterlist.h>
#include <trace/events/tcp.h>
@@ -839,7 +840,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb,
genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
- if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
+ if (genhash || crypto_memneq(md5_hash_location, newhash, 16))
goto out;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 24b68e99636e..0aedb20072ac 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -267,35 +267,36 @@ bool ip6_autoflowlabel(struct net *net, const struct sock *sk)
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
{
- struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
- struct net_device *dev = dst_dev(dst);
struct inet6_dev *idev = ip6_dst_idev(dst);
struct hop_jumbo_hdr *hop_jumbo;
int hoplen = sizeof(*hop_jumbo);
+ struct net *net = sock_net(sk);
unsigned int head_room;
+ struct net_device *dev;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
- int hlimit = -1;
+ int ret, hlimit = -1;
u32 mtu;
+ rcu_read_lock();
+
+ dev = dst_dev_rcu(dst);
head_room = sizeof(struct ipv6hdr) + hoplen + LL_RESERVED_SPACE(dev);
if (opt)
head_room += opt->opt_nflen + opt->opt_flen;
if (unlikely(head_room > skb_headroom(skb))) {
- /* Make sure idev stays alive */
- rcu_read_lock();
+ /* idev stays alive while we hold rcu_read_lock(). */
skb = skb_expand_head(skb, head_room);
if (!skb) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
- rcu_read_unlock();
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto unlock;
}
- rcu_read_unlock();
}
if (opt) {
@@ -357,17 +358,21 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
* skb to its handler for processing
*/
skb = l3mdev_ip6_out((struct sock *)sk, skb);
- if (unlikely(!skb))
- return 0;
+ if (unlikely(!skb)) {
+ ret = 0;
+ goto unlock;
+ }
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
- return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
- net, (struct sock *)sk, skb, NULL, dev,
- dst_output);
+ ret = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+ net, (struct sock *)sk, skb, NULL, dev,
+ dst_output);
+ goto unlock;
}
+ ret = -EMSGSIZE;
skb->dev = dev;
/* ipv6_local_error() does not require socket lock,
* we promote our socket to non const
@@ -376,7 +381,9 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
IP6_INC_STATS(net, idev, IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
- return -EMSGSIZE;
+unlock:
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL(ip6_xmit);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7b9279d4c363..36324d1905f8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -148,9 +148,9 @@ void rt6_uncached_list_add(struct rt6_info *rt)
void rt6_uncached_list_del(struct rt6_info *rt)
{
- if (!list_empty(&rt->dst.rt_uncached)) {
- struct uncached_list *ul = rt->dst.rt_uncached_list;
+ struct uncached_list *ul = rt->dst.rt_uncached_list;
+ if (ul) {
spin_lock_bh(&ul->lock);
list_del_init(&rt->dst.rt_uncached);
spin_unlock_bh(&ul->lock);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c90b20c218bf..a79cd20d3d31 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -66,6 +66,7 @@
#include <linux/seq_file.h>
#include <crypto/hash.h>
+#include <crypto/utils.h>
#include <linux/scatterlist.h>
#include <trace/events/tcp.h>
@@ -1084,7 +1085,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
key.type = TCP_KEY_MD5;
genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
- if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
+ if (genhash || crypto_memneq(md5_hash_location, newhash, 16))
goto out;
}
#endif
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 95ec5f0b8324..fa4bb78102f7 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -423,14 +423,16 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list,
list) {
- struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_sub_if_data *sdata;
enum ieee80211_sta_rx_bandwidth new_sta_bw;
unsigned int link_id;
if (!ieee80211_sdata_running(sta->sdata))
continue;
- for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) {
+ sdata = get_bss_sdata(sta->sdata);
+
+ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
struct ieee80211_link_data *link =
rcu_dereference(sdata->link[link_id]);
struct ieee80211_bss_conf *link_conf;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index a0710ae0e7a4..e9b3b2c7b6fa 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -327,7 +327,6 @@ static ssize_t aql_enable_read(struct file *file, char __user *user_buf,
static ssize_t aql_enable_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- bool aql_disabled = static_key_false(&aql_disable.key);
char buf[3];
size_t len;
@@ -342,15 +341,12 @@ static ssize_t aql_enable_write(struct file *file, const char __user *user_buf,
if (len > 0 && buf[len - 1] == '\n')
buf[len - 1] = 0;
- if (buf[0] == '0' && buf[1] == '\0') {
- if (!aql_disabled)
- static_branch_inc(&aql_disable);
- } else if (buf[0] == '1' && buf[1] == '\0') {
- if (aql_disabled)
- static_branch_dec(&aql_disable);
- } else {
+ if (buf[0] == '0' && buf[1] == '\0')
+ static_branch_enable(&aql_disable);
+ else if (buf[0] == '1' && buf[1] == '\0')
+ static_branch_disable(&aql_disable);
+ else
return -EINVAL;
- }
return count;
}
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index 28ce41356341..df303496914c 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -283,6 +283,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS];
struct ieee80211_link_data *old_data[IEEE80211_MLD_MAX_NUM_LINKS];
bool use_deflink = old_links == 0; /* set for error case */
+ bool non_sta = sdata->vif.type != NL80211_IFTYPE_STATION;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -339,6 +340,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
link = links[link_id];
ieee80211_link_init(sdata, link_id, &link->data, &link->conf);
ieee80211_link_setup(&link->data);
+ ieee80211_set_wmm_default(&link->data, true, non_sta);
}
if (new_links == 0)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 00bdf36e333e..253f4b064284 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -78,6 +78,9 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
* - MDA enabled
* - Power management control on fc
*/
+ if (!ie->mesh_config)
+ return false;
+
if (!(ifmsh->mesh_id_len == ie->mesh_id_len &&
memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
(ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 9e4631fade90..000be60d9580 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -469,7 +469,9 @@ static int mac802154_header_create(struct sk_buff *skb,
}
static int
-mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+mac802154_header_parse(const struct sk_buff *skb,
+ const struct net_device *dev,
+ unsigned char *haddr)
{
struct ieee802154_hdr hdr;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 19ff259d7bc4..08bbd861dc42 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -306,6 +306,7 @@ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev)
{
struct mctp_sk_key *key;
struct mctp_flow *flow;
+ unsigned long flags;
flow = skb_ext_find(skb, SKB_EXT_MCTP);
if (!flow)
@@ -313,12 +314,14 @@ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev)
key = flow->key;
- if (key->dev) {
+ spin_lock_irqsave(&key->lock, flags);
+
+ if (!key->dev)
+ mctp_dev_set_key(dev, key);
+ else
WARN_ON(key->dev != dev);
- return;
- }
- mctp_dev_set_key(dev, key);
+ spin_unlock_irqrestore(&key->lock, flags);
}
#else
static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 3373b6b34dc7..719dabb76ea2 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2774,6 +2774,7 @@ static int __init mpls_init(void)
out_unregister_rtnl_af:
rtnl_af_unregister(&mpls_af_ops);
dev_remove_pack(&mpls_packet_type);
+ unregister_netdevice_notifier(&mpls_dev_notifier);
out_unregister_pernet:
unregister_pernet_subsys(&mpls_net_ops);
goto out;
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 1b7541206a70..8d2c27c43ee0 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -56,7 +56,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_
msk->pm.rm_list_tx = *rm_list;
rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL);
WRITE_ONCE(msk->pm.addr_signal, rm_addr);
- mptcp_pm_nl_addr_send_ack(msk);
+ mptcp_pm_nl_addr_send_ack_avoid_list(msk, rm_list);
return 0;
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 864c26e22b24..c847335ffab9 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -661,6 +661,15 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
}
exit:
+ /* If an endpoint has both the signal and subflow flags, but it is not
+ * possible to create subflows -- the 'while' loop body above never
+ * executed -- then still mark the endp as used, which is somehow the
+ * case. This avoids issues later when removing the endpoint and calling
+ * __mark_subflow_endp_available(), which expects the increment here.
+ */
+ if (signal_and_subflow && local.addr.id != msk->mpc_endpoint_id)
+ msk->pm.local_addr_used++;
+
mptcp_pm_nl_check_work_pending(msk);
}
@@ -846,9 +855,23 @@ bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
}
-void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+static bool subflow_in_rm_list(const struct mptcp_subflow_context *subflow,
+ const struct mptcp_rm_list *rm_list)
{
- struct mptcp_subflow_context *subflow;
+ u8 i, id = subflow_get_local_id(subflow);
+
+ for (i = 0; i < rm_list->nr; i++) {
+ if (rm_list->ids[i] == id)
+ return true;
+ }
+
+ return false;
+}
+
+void mptcp_pm_nl_addr_send_ack_avoid_list(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list)
+{
+ struct mptcp_subflow_context *subflow, *same_id = NULL;
msk_owned_by_me(msk);
lockdep_assert_held(&msk->pm.lock);
@@ -858,11 +881,30 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
return;
mptcp_for_each_subflow(msk, subflow) {
- if (__mptcp_subflow_active(subflow)) {
- mptcp_pm_send_ack(msk, subflow, false, false);
- break;
+ if (!__mptcp_subflow_active(subflow))
+ continue;
+
+ if (unlikely(rm_list &&
+ subflow_in_rm_list(subflow, rm_list))) {
+ if (!same_id)
+ same_id = subflow;
+ } else {
+ goto send_ack;
}
}
+
+ if (same_id)
+ subflow = same_id;
+ else
+ return;
+
+send_ack:
+ mptcp_pm_send_ack(msk, subflow, false, false);
+}
+
+void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+{
+ mptcp_pm_nl_addr_send_ack_avoid_list(msk, NULL);
}
int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
@@ -1557,10 +1599,8 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
ret = remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
spin_lock_bh(&msk->pm.lock);
- if (ret) {
- __set_bit(addr->id, msk->pm.id_avail_bitmap);
+ if (ret)
msk->pm.add_addr_signaled--;
- }
mptcp_pm_remove_addr(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
@@ -1598,17 +1638,15 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
!(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
list.ids[0] = mptcp_endp_get_local_id(msk, addr);
- if (remove_subflow) {
- spin_lock_bh(&msk->pm.lock);
- mptcp_pm_nl_rm_subflow_received(msk, &list);
- spin_unlock_bh(&msk->pm.lock);
- }
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
- spin_lock_bh(&msk->pm.lock);
+ spin_lock_bh(&msk->pm.lock);
+ if (remove_subflow)
+ mptcp_pm_nl_rm_subflow_received(msk, &list);
+ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
__mark_subflow_endp_available(msk, list.ids[0]);
- spin_unlock_bh(&msk->pm.lock);
- }
+ else /* mark endp ID as available, e.g. Signal or MPC endp */
+ __set_bit(addr->id, msk->pm.id_avail_bitmap);
+ spin_unlock_bh(&msk->pm.lock);
if (msk->mpc_endpoint_id == entry->addr.id)
msk->mpc_endpoint_id = 0;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index b266002660d7..669991bbae75 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -1029,6 +1029,8 @@ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
const struct mptcp_addr_info *remote);
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+void mptcp_pm_nl_addr_send_ack_avoid_list(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup);
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 62fb1031763d..040a31557201 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -224,7 +224,8 @@ int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
if (!nah) {
netdev_warn(ndp->ndev.dev, "Invalid AEN (0x%x) received\n",
h->type);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out;
}
ret = ncsi_validate_aen_pkt(h, nah->payload);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index d5ed80731e89..0be1059371de 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -1176,8 +1176,10 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
/* Find the NCSI device */
nd = ncsi_find_dev(orig_dev);
ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
- if (!ndp)
- return -ENODEV;
+ if (!ndp) {
+ ret = -ENODEV;
+ goto err_free_skb;
+ }
/* Check if it is AEN packet */
hdr = (struct ncsi_pkt_hdr *)skb_network_header(skb);
@@ -1199,7 +1201,8 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
if (!nrh) {
netdev_err(nd->dev, "Received unrecognized packet (0x%x)\n",
hdr->type);
- return -ENOENT;
+ ret = -ENOENT;
+ goto err_free_skb;
}
/* Associate with the request */
@@ -1207,7 +1210,8 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
nr = &ndp->requests[hdr->id];
if (!nr->used) {
spin_unlock_irqrestore(&ndp->lock, flags);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_free_skb;
}
nr->rsp = skb;
@@ -1261,4 +1265,8 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
out:
ncsi_free_request(nr);
return ret;
+
+err_free_skb:
+ kfree_skb(skb);
+ return ret;
}
diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
index b5e4ca9026a8..be5e8bd90a3e 100644
--- a/net/netfilter/nf_bpf_link.c
+++ b/net/netfilter/nf_bpf_link.c
@@ -170,7 +170,7 @@ static int bpf_nf_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
static const struct bpf_link_ops bpf_nf_link_lops = {
.release = bpf_nf_link_release,
- .dealloc = bpf_nf_link_dealloc,
+ .dealloc_deferred = bpf_nf_link_dealloc,
.detach = bpf_nf_link_detach,
.show_fdinfo = bpf_nf_link_show_info,
.fill_link_info = bpf_nf_link_fill_link_info,
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 62aa22a07876..7b1497ed97d2 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -331,6 +331,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
if (nf_h323_error_boundary(bs, 0, 2))
return H323_ERROR_BOUND;
len = get_bits(bs, 2) + 1;
+ if (nf_h323_error_boundary(bs, len, 0))
+ return H323_ERROR_BOUND;
BYTE_ALIGN(bs);
if (base && (f->attr & DECODE)) { /* timeToLive */
unsigned int v = get_uint(bs, len) + f->lb;
@@ -922,6 +924,8 @@ int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
break;
p++;
len--;
+ if (len <= 0)
+ break;
return DecodeH323_UserInformation(buf, p, len,
&q931->UUIE);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 18a91c031554..627790fcb6bb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3146,23 +3146,27 @@ ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item)
return 0;
}
#endif
-static int ctnetlink_exp_done(struct netlink_callback *cb)
+
+static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp)
{
- if (cb->args[1])
- nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
- return 0;
+ unsigned long id = (unsigned long)exp;
+
+ id += nf_ct_get_id(exp->master);
+ id += exp->class;
+
+ return id ? id : 1;
}
static int
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
+ unsigned long last_id = cb->args[1];
+ struct nf_conntrack_expect *exp;
rcu_read_lock();
- last = (struct nf_conntrack_expect *)cb->args[1];
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
restart:
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
@@ -3174,7 +3178,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (cb->args[1]) {
- if (exp != last)
+ if (ctnetlink_exp_id(exp) != last_id)
continue;
cb->args[1] = 0;
}
@@ -3183,9 +3187,7 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
- if (!refcount_inc_not_zero(&exp->use))
- continue;
- cb->args[1] = (unsigned long)exp;
+ cb->args[1] = ctnetlink_exp_id(exp);
goto out;
}
}
@@ -3196,32 +3198,34 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
}
out:
rcu_read_unlock();
- if (last)
- nf_ct_expect_put(last);
-
return skb->len;
}
static int
ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nf_conn *ct = cb->data;
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_conn_help *help;
u_int8_t l3proto = nfmsg->nfgen_family;
+ unsigned long last_id = cb->args[1];
+ struct nf_conntrack_expect *exp;
if (cb->args[0])
return 0;
+ help = nfct_help(ct);
+ if (!help)
+ return 0;
+
rcu_read_lock();
- last = (struct nf_conntrack_expect *)cb->args[1];
+
restart:
hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
if (cb->args[1]) {
- if (exp != last)
+ if (ctnetlink_exp_id(exp) != last_id)
continue;
cb->args[1] = 0;
}
@@ -3229,9 +3233,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
- if (!refcount_inc_not_zero(&exp->use))
- continue;
- cb->args[1] = (unsigned long)exp;
+ cb->args[1] = ctnetlink_exp_id(exp);
goto out;
}
}
@@ -3242,12 +3244,27 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[0] = 1;
out:
rcu_read_unlock();
- if (last)
- nf_ct_expect_put(last);
-
return skb->len;
}
+static int ctnetlink_dump_exp_ct_start(struct netlink_callback *cb)
+{
+ struct nf_conn *ct = cb->data;
+
+ if (!refcount_inc_not_zero(&ct->ct_general.use))
+ return -ENOENT;
+ return 0;
+}
+
+static int ctnetlink_dump_exp_ct_done(struct netlink_callback *cb)
+{
+ struct nf_conn *ct = cb->data;
+
+ if (ct)
+ nf_ct_put(ct);
+ return 0;
+}
+
static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
struct sk_buff *skb,
const struct nlmsghdr *nlh,
@@ -3263,7 +3280,8 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
struct nf_conntrack_zone zone;
struct netlink_dump_control c = {
.dump = ctnetlink_exp_ct_dump_table,
- .done = ctnetlink_exp_done,
+ .start = ctnetlink_dump_exp_ct_start,
+ .done = ctnetlink_dump_exp_ct_done,
};
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
@@ -3313,7 +3331,6 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
else {
struct netlink_dump_control c = {
.dump = ctnetlink_exp_dump_table,
- .done = ctnetlink_exp_done,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index d0eac27f6ba0..657839a58782 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1534,11 +1534,12 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
{
struct tcphdr *th, _tcph;
unsigned int dataoff, datalen;
- unsigned int matchoff, matchlen, clen;
+ unsigned int matchoff, matchlen;
unsigned int msglen, origlen;
const char *dptr, *end;
s16 diff, tdiff = 0;
int ret = NF_ACCEPT;
+ unsigned long clen;
bool term;
if (ctinfo != IP_CT_ESTABLISHED &&
@@ -1573,6 +1574,9 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
if (dptr + matchoff == end)
break;
+ if (clen > datalen)
+ break;
+
term = false;
for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) {
if (end[0] == '\r' && end[1] == '\n' &&
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c1b9b00907bb..663c06413518 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -700,7 +700,6 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
nft_set_elem_change_active(ctx->net, set, ext);
nft_setelem_data_deactivate(ctx->net, set, catchall->elem);
- break;
}
}
@@ -5706,7 +5705,6 @@ static void nft_map_catchall_activate(const struct nft_ctx *ctx,
nft_clear(ctx->net, ext);
nft_setelem_data_activate(ctx->net, set, catchall->elem);
- break;
}
}
@@ -6639,8 +6637,8 @@ static void __nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
}
}
-static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
- struct nft_set_elem_expr *elem_expr)
+void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ struct nft_set_elem_expr *elem_expr)
{
struct nft_expr *expr;
u32 size;
@@ -8968,6 +8966,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
return 0;
err_flowtable_hooks:
+ synchronize_rcu();
nft_trans_destroy(trans);
err_flowtable_trans:
nft_hooks_destroy(&flowtable->hook_list);
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 97248963a7d3..71a248cca746 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -603,10 +603,10 @@ nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
goto out;
}
}
- }
- if (cb->args[1]) {
- cb->args[1] = 0;
- goto restart;
+ if (cb->args[1]) {
+ cb->args[1] = 0;
+ goto restart;
+ }
}
out:
rcu_read_unlock();
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index c0fc431991e8..9fc9544d4bc5 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -302,7 +302,9 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,
{
struct nf_osf_user_finger *f;
struct nf_osf_finger *kf = NULL, *sf;
+ unsigned int tot_opt_len = 0;
int err = 0;
+ int i;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -318,6 +320,17 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,
if (f->opt_num > ARRAY_SIZE(f->opt))
return -EINVAL;
+ for (i = 0; i < f->opt_num; i++) {
+ if (!f->opt[i].length || f->opt[i].length > MAX_IPOPTLEN)
+ return -EINVAL;
+ if (f->opt[i].kind == OSFOPT_MSS && f->opt[i].length < 4)
+ return -EINVAL;
+
+ tot_opt_len += f->opt[i].length;
+ if (tot_opt_len > MAX_IPOPTLEN)
+ return -EINVAL;
+ }
+
if (!memchr(f->genre, 0, MAXGENRELEN) ||
!memchr(f->subtype, 0, MAXGENRELEN) ||
!memchr(f->version, 0, MAXGENRELEN))
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index af35dbc19864..df0232cf24ce 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1547,8 +1547,10 @@ static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
if (entry->state.pf == PF_BRIDGE) {
err = nfqa_parse_bridge(entry, nfqa);
- if (err < 0)
+ if (err < 0) {
+ nfqnl_reinject(entry, NF_DROP);
return err;
+ }
}
if (nfqa[NFQA_PAYLOAD]) {
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 58a6ad7ed7a4..e361de439b77 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
+#include "nf_internals.h"
struct nft_ct_helper_obj {
struct nf_conntrack_helper *helper4;
@@ -527,6 +528,7 @@ static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
case NFT_CT_ZONE:
+ nf_queue_nf_hook_drop(ctx->net);
mutex_lock(&nft_ct_pcpu_mutex);
if (--nft_ct_pcpu_template_refcnt == 0)
nft_ct_tmpl_put_pcpu();
@@ -997,6 +999,7 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_ct_timeout *timeout = priv->timeout;
+ nf_queue_nf_hook_drop(ctx->net);
nf_ct_untimeout(ctx->net, timeout);
nf_ct_netns_put(ctx->net, ctx->family);
kfree(priv->timeout);
@@ -1129,6 +1132,7 @@ static void nft_ct_helper_obj_destroy(const struct nft_ctx *ctx,
{
struct nft_ct_helper_obj *priv = nft_obj_data(obj);
+ nf_queue_nf_hook_drop(ctx->net);
if (priv->helper4)
nf_conntrack_helper_put(priv->helper4);
if (priv->helper6)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index e24493d9e776..0b3c4f6a8dec 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -30,18 +30,26 @@ static int nft_dynset_expr_setup(const struct nft_dynset *priv,
const struct nft_set_ext *ext)
{
struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
+ struct nft_ctx ctx = {
+ .net = read_pnet(&priv->set->net),
+ .family = priv->set->table->family,
+ };
struct nft_expr *expr;
int i;
for (i = 0; i < priv->num_exprs; i++) {
expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
if (nft_expr_clone(expr, priv->expr_array[i], GFP_ATOMIC) < 0)
- return -1;
+ goto err_out;
elem_expr->size += priv->expr_array[i]->ops->size;
}
return 0;
+err_out:
+ nft_set_elem_expr_destroy(&ctx, elem_expr);
+
+ return -1;
}
static struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index ab5045bf3e59..a2dd1212e0f0 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -1627,6 +1627,7 @@ static void pipapo_drop(struct nft_pipapo_match *m,
int i;
nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
int g;
for (g = 0; g < f->groups; g++) {
@@ -1646,7 +1647,7 @@ static void pipapo_drop(struct nft_pipapo_match *m,
}
pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n,
- rulemap[i + 1].n, i == m->field_count - 1);
+ last ? 0 : rulemap[i + 1].n, last);
if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) {
/* We can ignore this, a failure to shrink tables down
* doesn't make tables invalid.
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 3ba94c34297c..498f5871c84a 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -16,6 +16,7 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include "nf_internals.h"
static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
{
@@ -283,6 +284,9 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
struct nf_conn_help *help;
if (ct) {
+ if (info->helper[0] || info->timeout[0])
+ nf_queue_nf_hook_drop(par->net);
+
help = nfct_help(ct);
xt_ct_put_helper(help);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 9869ef3c2ab3..92a8289b1cb3 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -320,6 +320,12 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
+ if (info->timer->timer_type & XT_IDLETIMER_ALARM) {
+ pr_debug("Adding/Replacing rule with same label and different timer type is not allowed\n");
+ mutex_unlock(&list_mutex);
+ return -EINVAL;
+ }
+
info->timer->refcnt++;
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
index e5a13ecbe67a..037ab93e25d0 100644
--- a/net/netfilter/xt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -62,10 +62,10 @@ dccp_find_option(u_int8_t option,
return true;
}
- if (op[i] < 2)
+ if (op[i] < 2 || i == optlen - 1)
i++;
else
- i += op[i+1]?:1;
+ i += op[i + 1] ? : 1;
}
spin_unlock_bh(&dccp_buflock);
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index e8991130a3de..f76cf18f1a24 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -59,8 +59,10 @@ tcp_find_option(u_int8_t option,
for (i = 0; i < optlen; ) {
if (op[i] == option) return !invert;
- if (op[i] < 2) i++;
- else i += op[i+1]?:1;
+ if (op[i] < 2 || i == optlen - 1)
+ i++;
+ else
+ i += op[i + 1] ? : 1;
}
return invert;
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 6aa12d0f54e2..61de85e02a40 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -227,13 +227,13 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
localtime_2(¤t_time, stamp);
- if (!(info->weekdays_match & (1 << current_time.weekday)))
+ if (!(info->weekdays_match & (1U << current_time.weekday)))
return false;
/* Do not spend time computing monthday if all days match anyway */
if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) {
localtime_3(¤t_time, stamp);
- if (!(info->monthdays_match & (1 << current_time.monthday)))
+ if (!(info->monthdays_match & (1U << current_time.monthday)))
return false;
}
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index a27efa4faa4e..532ee4e10ba9 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -129,9 +129,12 @@ static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
return 1;
}
-static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+static int pn_header_parse(const struct sk_buff *skb,
+ const struct net_device *dev,
+ unsigned char *haddr)
{
const u8 *media = skb_mac_header(skb);
+
*haddr = *media;
return 1;
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 1676c9f4ab84..0223d6c34f0b 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -810,6 +810,11 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
goto out_release;
}
+ if (sk->sk_state == TCP_SYN_SENT) {
+ err = -EALREADY;
+ goto out_release;
+ }
+
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index e24a44bae9a3..b6e524c065f0 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -430,7 +430,8 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (rxrpc_call_has_failed(call))
goto call_failed;
- if (!skb_queue_empty(&call->recvmsg_queue))
+ if (!(flags & MSG_PEEK) &&
+ !skb_queue_empty(&call->recvmsg_queue))
rxrpc_notify_socket(call);
goto not_yet_complete;
@@ -461,11 +462,21 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
error_requeue_call:
if (!(flags & MSG_PEEK)) {
spin_lock(&rx->recvmsg_lock);
- list_add(&call->recvmsg_link, &rx->recvmsg_q);
- spin_unlock(&rx->recvmsg_lock);
+ if (list_empty(&call->recvmsg_link)) {
+ list_add(&call->recvmsg_link, &rx->recvmsg_q);
+ rxrpc_see_call(call, rxrpc_call_see_recvmsg_requeue);
+ spin_unlock(&rx->recvmsg_lock);
+ } else if (list_is_first(&call->recvmsg_link, &rx->recvmsg_q)) {
+ spin_unlock(&rx->recvmsg_lock);
+ rxrpc_put_call(call, rxrpc_call_see_recvmsg_requeue_first);
+ } else {
+ list_move(&call->recvmsg_link, &rx->recvmsg_q);
+ spin_unlock(&rx->recvmsg_lock);
+ rxrpc_put_call(call, rxrpc_call_see_recvmsg_requeue_move);
+ }
trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
} else {
- rxrpc_put_call(call, rxrpc_call_put_recvmsg);
+ rxrpc_put_call(call, rxrpc_call_put_recvmsg_peek_nowait);
}
error_no_call:
release_sock(&rx->sk);
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 1dd74125398a..58cae012a439 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -32,9 +32,12 @@ static ktime_t gate_get_time(struct tcf_gate *gact)
return KTIME_MAX;
}
-static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void tcf_gate_params_free_rcu(struct rcu_head *head);
+
+static void gate_get_start_time(struct tcf_gate *gact,
+ const struct tcf_gate_params *param,
+ ktime_t *start)
{
- struct tcf_gate_params *param = &gact->param;
ktime_t now, base, cycle;
u64 n;
@@ -69,12 +72,14 @@ static enum hrtimer_restart gate_timer_func(struct hrtimer *timer)
{
struct tcf_gate *gact = container_of(timer, struct tcf_gate,
hitimer);
- struct tcf_gate_params *p = &gact->param;
struct tcfg_gate_entry *next;
+ struct tcf_gate_params *p;
ktime_t close_time, now;
spin_lock(&gact->tcf_lock);
+ p = rcu_dereference_protected(gact->param,
+ lockdep_is_held(&gact->tcf_lock));
next = gact->next_entry;
/* cycle start, clear pending bit, clear total octets */
@@ -230,6 +235,35 @@ static void release_entry_list(struct list_head *entries)
}
}
+static int tcf_gate_copy_entries(struct tcf_gate_params *dst,
+ const struct tcf_gate_params *src,
+ struct netlink_ext_ack *extack)
+{
+ struct tcfg_gate_entry *entry;
+ int i = 0;
+
+ list_for_each_entry(entry, &src->entries, list) {
+ struct tcfg_gate_entry *new;
+
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ NL_SET_ERR_MSG(extack, "Not enough memory for entry");
+ return -ENOMEM;
+ }
+
+ new->index = entry->index;
+ new->gate_state = entry->gate_state;
+ new->interval = entry->interval;
+ new->ipv = entry->ipv;
+ new->maxoctets = entry->maxoctets;
+ list_add_tail(&new->list, &dst->entries);
+ i++;
+ }
+
+ dst->num_entries = i;
+ return 0;
+}
+
static int parse_gate_list(struct nlattr *list_attr,
struct tcf_gate_params *sched,
struct netlink_ext_ack *extack)
@@ -275,23 +309,42 @@ static int parse_gate_list(struct nlattr *list_attr,
return err;
}
-static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
- enum tk_offsets tko, s32 clockid,
- bool do_init)
+static bool gate_timer_needs_cancel(u64 basetime, u64 old_basetime,
+ enum tk_offsets tko,
+ enum tk_offsets old_tko,
+ s32 clockid, s32 old_clockid)
{
- if (!do_init) {
- if (basetime == gact->param.tcfg_basetime &&
- tko == gact->tk_offset &&
- clockid == gact->param.tcfg_clockid)
- return;
+ return basetime != old_basetime ||
+ clockid != old_clockid ||
+ tko != old_tko;
+}
- spin_unlock_bh(&gact->tcf_lock);
- hrtimer_cancel(&gact->hitimer);
- spin_lock_bh(&gact->tcf_lock);
+static int gate_clock_resolve(s32 clockid, enum tk_offsets *tko,
+ struct netlink_ext_ack *extack)
+{
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ *tko = TK_OFFS_REAL;
+ return 0;
+ case CLOCK_MONOTONIC:
+ *tko = TK_OFFS_MAX;
+ return 0;
+ case CLOCK_BOOTTIME:
+ *tko = TK_OFFS_BOOT;
+ return 0;
+ case CLOCK_TAI:
+ *tko = TK_OFFS_TAI;
+ return 0;
+ default:
+ NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+ return -EINVAL;
}
- gact->param.tcfg_basetime = basetime;
- gact->param.tcfg_clockid = clockid;
- gact->tk_offset = tko;
+}
+
+static void gate_setup_timer(struct tcf_gate *gact, s32 clockid,
+ enum tk_offsets tko)
+{
+ WRITE_ONCE(gact->tk_offset, tko);
hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
gact->hitimer.function = gate_timer_func;
}
@@ -302,15 +355,22 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id);
- enum tk_offsets tk_offset = TK_OFFS_TAI;
+ u64 cycletime = 0, basetime = 0, cycletime_ext = 0;
+ struct tcf_gate_params *p = NULL, *old_p = NULL;
+ enum tk_offsets old_tk_offset = TK_OFFS_TAI;
+ const struct tcf_gate_params *cur_p = NULL;
bool bind = flags & TCA_ACT_FLAGS_BIND;
struct nlattr *tb[TCA_GATE_MAX + 1];
+ enum tk_offsets tko = TK_OFFS_TAI;
struct tcf_chain *goto_ch = NULL;
- u64 cycletime = 0, basetime = 0;
- struct tcf_gate_params *p;
+ s32 timer_clockid = CLOCK_TAI;
+ bool use_old_entries = false;
+ s32 old_clockid = CLOCK_TAI;
+ bool need_cancel = false;
s32 clockid = CLOCK_TAI;
struct tcf_gate *gact;
struct tc_gate *parm;
+ u64 old_basetime = 0;
int ret = 0, err;
u32 gflags = 0;
s32 prio = -1;
@@ -327,26 +387,8 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_GATE_PARMS])
return -EINVAL;
- if (tb[TCA_GATE_CLOCKID]) {
+ if (tb[TCA_GATE_CLOCKID])
clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
- switch (clockid) {
- case CLOCK_REALTIME:
- tk_offset = TK_OFFS_REAL;
- break;
- case CLOCK_MONOTONIC:
- tk_offset = TK_OFFS_MAX;
- break;
- case CLOCK_BOOTTIME:
- tk_offset = TK_OFFS_BOOT;
- break;
- case CLOCK_TAI:
- tk_offset = TK_OFFS_TAI;
- break;
- default:
- NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
- return -EINVAL;
- }
- }
parm = nla_data(tb[TCA_GATE_PARMS]);
index = parm->index;
@@ -372,6 +414,60 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
return -EEXIST;
}
+ gact = to_gate(*a);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+ goto release_idr;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ err = -ENOMEM;
+ goto chain_put;
+ }
+ INIT_LIST_HEAD(&p->entries);
+
+ use_old_entries = !tb[TCA_GATE_ENTRY_LIST];
+ if (!use_old_entries) {
+ err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
+ if (err < 0)
+ goto err_free;
+ use_old_entries = !err;
+ }
+
+ if (ret == ACT_P_CREATED && use_old_entries) {
+ NL_SET_ERR_MSG(extack, "The entry list is empty");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (ret != ACT_P_CREATED) {
+ rcu_read_lock();
+ cur_p = rcu_dereference(gact->param);
+
+ old_basetime = cur_p->tcfg_basetime;
+ old_clockid = cur_p->tcfg_clockid;
+ old_tk_offset = READ_ONCE(gact->tk_offset);
+
+ basetime = old_basetime;
+ cycletime_ext = cur_p->tcfg_cycletime_ext;
+ prio = cur_p->tcfg_priority;
+ gflags = cur_p->tcfg_flags;
+
+ if (!tb[TCA_GATE_CLOCKID])
+ clockid = old_clockid;
+
+ err = 0;
+ if (use_old_entries) {
+ err = tcf_gate_copy_entries(p, cur_p, extack);
+ if (!err && !tb[TCA_GATE_CYCLE_TIME])
+ cycletime = cur_p->tcfg_cycletime;
+ }
+ rcu_read_unlock();
+ if (err)
+ goto err_free;
+ }
+
if (tb[TCA_GATE_PRIORITY])
prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
@@ -381,25 +477,26 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (tb[TCA_GATE_FLAGS])
gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
- gact = to_gate(*a);
- if (ret == ACT_P_CREATED)
- INIT_LIST_HEAD(&gact->param.entries);
+ if (tb[TCA_GATE_CYCLE_TIME])
+ cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
- err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
- if (err < 0)
- goto release_idr;
+ if (tb[TCA_GATE_CYCLE_TIME_EXT])
+ cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
- spin_lock_bh(&gact->tcf_lock);
- p = &gact->param;
+ err = gate_clock_resolve(clockid, &tko, extack);
+ if (err)
+ goto err_free;
+ timer_clockid = clockid;
- if (tb[TCA_GATE_CYCLE_TIME])
- cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
+ need_cancel = ret != ACT_P_CREATED &&
+ gate_timer_needs_cancel(basetime, old_basetime,
+ tko, old_tk_offset,
+ timer_clockid, old_clockid);
- if (tb[TCA_GATE_ENTRY_LIST]) {
- err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
- if (err < 0)
- goto chain_put;
- }
+ if (need_cancel)
+ hrtimer_cancel(&gact->hitimer);
+
+ spin_lock_bh(&gact->tcf_lock);
if (!cycletime) {
struct tcfg_gate_entry *entry;
@@ -408,22 +505,20 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
list_for_each_entry(entry, &p->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
cycletime = cycle;
- if (!cycletime) {
- err = -EINVAL;
- goto chain_put;
- }
}
p->tcfg_cycletime = cycletime;
+ p->tcfg_cycletime_ext = cycletime_ext;
- if (tb[TCA_GATE_CYCLE_TIME_EXT])
- p->tcfg_cycletime_ext =
- nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
-
- gate_setup_timer(gact, basetime, tk_offset, clockid,
- ret == ACT_P_CREATED);
+ if (need_cancel || ret == ACT_P_CREATED)
+ gate_setup_timer(gact, timer_clockid, tko);
p->tcfg_priority = prio;
p->tcfg_flags = gflags;
- gate_get_start_time(gact, &start);
+ p->tcfg_basetime = basetime;
+ p->tcfg_clockid = timer_clockid;
+ gate_get_start_time(gact, p, &start);
+
+ old_p = rcu_replace_pointer(gact->param, p,
+ lockdep_is_held(&gact->tcf_lock));
gact->current_close_time = start;
gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
@@ -440,11 +535,15 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
+ if (old_p)
+ call_rcu(&old_p->rcu, tcf_gate_params_free_rcu);
+
return ret;
+err_free:
+ release_entry_list(&p->entries);
+ kfree(p);
chain_put:
- spin_unlock_bh(&gact->tcf_lock);
-
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
@@ -452,21 +551,29 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
* without taking tcf_lock.
*/
if (ret == ACT_P_CREATED)
- gate_setup_timer(gact, gact->param.tcfg_basetime,
- gact->tk_offset, gact->param.tcfg_clockid,
- true);
+ gate_setup_timer(gact, timer_clockid, tko);
+
tcf_idr_release(*a, bind);
return err;
}
+static void tcf_gate_params_free_rcu(struct rcu_head *head)
+{
+ struct tcf_gate_params *p = container_of(head, struct tcf_gate_params, rcu);
+
+ release_entry_list(&p->entries);
+ kfree(p);
+}
+
static void tcf_gate_cleanup(struct tc_action *a)
{
struct tcf_gate *gact = to_gate(a);
struct tcf_gate_params *p;
- p = &gact->param;
hrtimer_cancel(&gact->hitimer);
- release_entry_list(&p->entries);
+ p = rcu_dereference_protected(gact->param, 1);
+ if (p)
+ call_rcu(&p->rcu, tcf_gate_params_free_rcu);
}
static int dumping_entry(struct sk_buff *skb,
@@ -515,10 +622,9 @@ static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
struct nlattr *entry_list;
struct tcf_t t;
- spin_lock_bh(&gact->tcf_lock);
- opt.action = gact->tcf_action;
-
- p = &gact->param;
+ rcu_read_lock();
+ opt.action = READ_ONCE(gact->tcf_action);
+ p = rcu_dereference(gact->param);
if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
@@ -558,12 +664,12 @@ static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a,
tcf_tm_dump(&t, &gact->tcf_tm);
if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD))
goto nla_put_failure;
- spin_unlock_bh(&gact->tcf_lock);
+ rcu_read_unlock();
return skb->len;
nla_put_failure:
- spin_unlock_bh(&gact->tcf_lock);
+ rcu_read_unlock();
nlmsg_trim(skb, b);
return -1;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d27383c54b70..3e1dbb84bb83 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1297,33 +1297,6 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}
-static void dev_reset_queue(struct net_device *dev,
- struct netdev_queue *dev_queue,
- void *_unused)
-{
- struct Qdisc *qdisc;
- bool nolock;
-
- qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
- if (!qdisc)
- return;
-
- nolock = qdisc->flags & TCQ_F_NOLOCK;
-
- if (nolock)
- spin_lock_bh(&qdisc->seqlock);
- spin_lock_bh(qdisc_lock(qdisc));
-
- qdisc_reset(qdisc);
-
- spin_unlock_bh(qdisc_lock(qdisc));
- if (nolock) {
- clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
- clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
- spin_unlock_bh(&qdisc->seqlock);
- }
-}
-
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index cc6051d4f2ef..c3e18bae8fbf 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -113,14 +113,15 @@ static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
- struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress);
+ struct bpf_mprog_entry *entry;
if (sch->parent != TC_H_INGRESS)
return;
tcf_block_put_ext(q->block, sch, &q->block_info);
- if (entry) {
+ if (mini_qdisc_pair_inited(&q->miniqp)) {
+ entry = rtnl_dereference(dev->tcx_ingress);
tcx_miniq_dec(entry);
if (!tcx_entry_is_active(entry)) {
tcx_entry_update(dev, NULL, true);
@@ -290,10 +291,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
static void clsact_destroy(struct Qdisc *sch)
{
+ struct bpf_mprog_entry *ingress_entry, *egress_entry;
struct clsact_sched_data *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
- struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress);
- struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress);
if (sch->parent != TC_H_CLSACT)
return;
@@ -301,7 +301,8 @@ static void clsact_destroy(struct Qdisc *sch)
tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
- if (ingress_entry) {
+ if (mini_qdisc_pair_inited(&q->miniqp_ingress)) {
+ ingress_entry = rtnl_dereference(dev->tcx_ingress);
tcx_miniq_dec(ingress_entry);
if (!tcx_entry_is_active(ingress_entry)) {
tcx_entry_update(dev, NULL, true);
@@ -309,7 +310,8 @@ static void clsact_destroy(struct Qdisc *sch)
}
}
- if (egress_entry) {
+ if (mini_qdisc_pair_inited(&q->miniqp_egress)) {
+ egress_entry = rtnl_dereference(dev->tcx_egress);
tcx_miniq_dec(egress_entry);
if (!tcx_entry_is_active(egress_entry)) {
tcx_entry_update(dev, NULL, false);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 6e4bdaa876ed..ec4039a201a2 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -146,15 +146,12 @@ teql_destroy(struct Qdisc *sch)
master->slaves = NEXT_SLAVE(q);
if (q == master->slaves) {
struct netdev_queue *txq;
- spinlock_t *root_lock;
txq = netdev_get_tx_queue(master->dev, 0);
master->slaves = NULL;
- root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc));
- spin_lock_bh(root_lock);
- qdisc_reset(rtnl_dereference(txq->qdisc));
- spin_unlock_bh(root_lock);
+ dev_reset_queue(master->dev,
+ txq, NULL);
}
}
skb_queue_purge(&dat->q);
@@ -315,6 +312,7 @@ static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);
+ skb->dev = slave;
if (!netif_xmit_frozen_or_stopped(slave_txq) &&
netdev_start_xmit(skb, slave, slave_txq, false) ==
NETDEV_TX_OK) {
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 02e08ac1da3a..23bb360ebd07 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -130,7 +130,14 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
struct smc_sock *smc;
struct sock *child;
- smc = smc_clcsock_user_data(sk);
+ rcu_read_lock();
+ smc = smc_clcsock_user_data_rcu(sk);
+ if (!smc || !refcount_inc_not_zero(&smc->sk.sk_refcnt)) {
+ rcu_read_unlock();
+ smc = NULL;
+ goto drop;
+ }
+ rcu_read_unlock();
if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
sk->sk_max_ack_backlog)
@@ -152,11 +159,14 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
}
+ sock_put(&smc->sk);
return child;
drop:
dst_release(dst);
tcp_listendrop(sk);
+ if (smc)
+ sock_put(&smc->sk);
return NULL;
}
@@ -253,7 +263,7 @@ static void smc_fback_restore_callbacks(struct smc_sock *smc)
struct sock *clcsk = smc->clcsock->sk;
write_lock_bh(&clcsk->sk_callback_lock);
- clcsk->sk_user_data = NULL;
+ rcu_assign_sk_user_data(clcsk, NULL);
smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
@@ -901,7 +911,7 @@ static void smc_fback_replace_callbacks(struct smc_sock *smc)
struct sock *clcsk = smc->clcsock->sk;
write_lock_bh(&clcsk->sk_callback_lock);
- clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+ __rcu_assign_sk_user_data_with_flags(clcsk, smc, SK_USER_DATA_NOCOPY);
smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
&smc->clcsk_state_change);
@@ -2663,8 +2673,8 @@ int smc_listen(struct socket *sock, int backlog)
* smc-specific sk_data_ready function
*/
write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
- smc->clcsock->sk->sk_user_data =
- (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+ __rcu_assign_sk_user_data_with_flags(smc->clcsock->sk, smc,
+ SK_USER_DATA_NOCOPY);
smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
smc_clcsock_data_ready, &smc->clcsk_data_ready);
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
@@ -2685,10 +2695,11 @@ int smc_listen(struct socket *sock, int backlog)
write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
&smc->clcsk_data_ready);
- smc->clcsock->sk->sk_user_data = NULL;
+ rcu_assign_sk_user_data(smc->clcsock->sk, NULL);
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
goto out;
}
+ sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 7579f9622e01..f9d364a2167a 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -346,6 +346,11 @@ static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
}
+static inline struct smc_sock *smc_clcsock_user_data_rcu(const struct sock *clcsk)
+{
+ return (struct smc_sock *)rcu_dereference_sk_user_data(clcsk);
+}
+
/* save target_cb in saved_cb, and replace target_cb with new_cb */
static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *),
void (*new_cb)(struct sock *),
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 10219f55aad1..bb0313ef5f7c 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -218,7 +218,7 @@ int smc_close_active(struct smc_sock *smc)
write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
&smc->clcsk_data_ready);
- smc->clcsock->sk->sk_user_data = NULL;
+ rcu_assign_sk_user_data(smc->clcsock->sk, NULL);
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7fcb0574fc79..25aeb2596c67 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1049,14 +1049,25 @@ static int cache_release(struct inode *inode, struct file *filp,
struct cache_reader *rp = filp->private_data;
if (rp) {
+ struct cache_request *rq = NULL;
+
spin_lock(&queue_lock);
if (rp->offset) {
struct cache_queue *cq;
- for (cq= &rp->q; &cq->list != &cd->queue;
- cq = list_entry(cq->list.next, struct cache_queue, list))
+ for (cq = &rp->q; &cq->list != &cd->queue;
+ cq = list_entry(cq->list.next,
+ struct cache_queue, list))
if (!cq->reader) {
- container_of(cq, struct cache_request, q)
- ->readers--;
+ struct cache_request *cr =
+ container_of(cq,
+ struct cache_request, q);
+ cr->readers--;
+ if (cr->readers == 0 &&
+ !test_bit(CACHE_PENDING,
+ &cr->item->flags)) {
+ list_del(&cr->q.list);
+ rq = cr;
+ }
break;
}
rp->offset = 0;
@@ -1064,9 +1075,14 @@ static int cache_release(struct inode *inode, struct file *filp,
list_del(&rp->q.list);
spin_unlock(&queue_lock);
+ if (rq) {
+ cache_put(rq->item, cd);
+ kfree(rq->buf);
+ kfree(rq);
+ }
+
filp->private_data = NULL;
kfree(rp);
-
}
if (filp->f_mode & FMODE_WRITE) {
atomic_dec(&cd->writers);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 63262ef0c2e3..8abbd9c4045a 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1362,7 +1362,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
needed += RPCRDMA_MAX_RECV_BATCH;
if (atomic_inc_return(&ep->re_receiving) > 1)
- goto out;
+ goto out_dec;
/* fast path: all needed reps can be found on the free list */
wr = NULL;
@@ -1385,7 +1385,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
++count;
}
if (!wr)
- goto out;
+ goto out_dec;
rc = ib_post_recv(ep->re_id->qp, wr,
(const struct ib_recv_wr **)&bad_wr);
@@ -1400,9 +1400,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
--count;
}
}
+
+out_dec:
if (atomic_dec_return(&ep->re_receiving) > 0)
complete(&ep->re_done);
-
out:
trace_xprtrdma_post_recvs(r_xprt, count);
ep->re_receive_count += count;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 65dcbb54f55d..f6ae4332c070 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2233,6 +2233,8 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
if (skb_queue_empty(&sk->sk_write_queue))
break;
get_random_bytes(&delay, 2);
+ if (tsk->conn_timeout < 4)
+ tsk->conn_timeout = 4;
delay %= (tsk->conn_timeout / 4);
delay = msecs_to_jiffies(delay + 100);
sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index 0396fa19bdf1..d2b61b6ba58d 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -647,6 +647,7 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
}
spin_unlock_bh(&wdev->pmsr_lock);
+ cancel_work_sync(&wdev->pmsr_free_wk);
if (found)
cfg80211_pmsr_process_abort(wdev);
diff --git a/security/security.c b/security/security.c
index c5981e558bc2..6e4deac6ec07 100644
--- a/security/security.c
+++ b/security/security.c
@@ -79,6 +79,7 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = {
[LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
[LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
[LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection",
+ [LOCKDOWN_XEN_USER_ACTIONS] = "Xen guest user action",
[LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes",
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 155df14e8626..23708dc02401 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2144,6 +2144,10 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
for (;;) {
long tout;
struct snd_pcm_runtime *to_check;
+ unsigned int drain_rate;
+ snd_pcm_uframes_t drain_bufsz;
+ bool drain_no_period_wakeup;
+
if (signal_pending(current)) {
result = -ERESTARTSYS;
break;
@@ -2163,16 +2167,25 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
snd_pcm_group_unref(group, substream);
if (!to_check)
break; /* all drained */
+ /*
+ * Cache the runtime fields needed after unlock.
+ * A concurrent close() on the linked stream may free
+ * its runtime via snd_pcm_detach_substream() once we
+ * release the stream lock below.
+ */
+ drain_no_period_wakeup = to_check->no_period_wakeup;
+ drain_rate = to_check->rate;
+ drain_bufsz = to_check->buffer_size;
init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
- if (runtime->no_period_wakeup)
+ if (drain_no_period_wakeup)
tout = MAX_SCHEDULE_TIMEOUT;
else {
tout = 100;
- if (runtime->rate) {
- long t = runtime->buffer_size * 1100 / runtime->rate;
+ if (drain_rate) {
+ long t = drain_bufsz * 1100 / drain_rate;
tout = max(t, tout);
}
tout = msecs_to_jiffies(tout);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c13def0f1e1a..cb6ff3c36c5f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4164,6 +4164,24 @@ static int alc269_resume(struct hda_codec *codec)
return 0;
}
+#define STARLABS_STARFIGHTER_SHUTUP_DELAY_MS 30
+
+static void starlabs_starfighter_shutup(struct hda_codec *codec)
+{
+ if (snd_hda_gen_shutup_speakers(codec))
+ msleep(STARLABS_STARFIGHTER_SHUTUP_DELAY_MS);
+}
+
+static void alc233_fixup_starlabs_starfighter(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ spec->shutup = starlabs_starfighter_shutup;
+}
+
static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -8203,6 +8221,7 @@ enum {
ALC245_FIXUP_CLEVO_NOISY_MIC,
ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE,
ALC233_FIXUP_MEDION_MTL_SPK,
+ ALC233_FIXUP_STARLABS_STARFIGHTER,
ALC294_FIXUP_BASS_SPEAKER_15,
ALC283_FIXUP_DELL_HP_RESUME,
ALC294_FIXUP_ASUS_CS35L41_SPI_2,
@@ -10591,6 +10610,10 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
},
+ [ALC233_FIXUP_STARLABS_STARFIGHTER] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc233_fixup_starlabs_starfighter,
+ },
[ALC294_FIXUP_BASS_SPEAKER_15] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc294_fixup_bass_speaker_15,
@@ -11606,6 +11629,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x2782, 0x1705, "MEDION E15433", ALC269VC_FIXUP_INFINIX_Y4_MAX),
SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x2782, 0x4900, "MEDION E15443", ALC233_FIXUP_MEDION_MTL_SPK),
+ SND_PCI_QUIRK(0x7017, 0x2014, "Star Labs StarFighter", ALC233_FIXUP_STARLABS_STARFIGHTER),
SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
@@ -11702,6 +11726,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"},
{.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"},
{.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
+ {.id = ALC233_FIXUP_STARLABS_STARFIGHTER, .name = "starlabs-starfighter"},
{.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
{.id = ALC269_FIXUP_SONY_VAIO, .name = "vaio"},
{.id = ALC269_FIXUP_DELL_M101Z, .name = "dell-m101z"},
diff --git a/sound/soc/amd/acp3x-rt5682-max9836.c b/sound/soc/amd/acp3x-rt5682-max9836.c
index 357dfd016baf..6c4716565ded 100644
--- a/sound/soc/amd/acp3x-rt5682-max9836.c
+++ b/sound/soc/amd/acp3x-rt5682-max9836.c
@@ -94,8 +94,13 @@ static int acp3x_5682_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- rt5682_dai_wclk = clk_get(component->dev, "rt5682-dai-wclk");
- rt5682_dai_bclk = clk_get(component->dev, "rt5682-dai-bclk");
+ rt5682_dai_wclk = devm_clk_get(component->dev, "rt5682-dai-wclk");
+ if (IS_ERR(rt5682_dai_wclk))
+ return PTR_ERR(rt5682_dai_wclk);
+
+ rt5682_dai_bclk = devm_clk_get(component->dev, "rt5682-dai-bclk");
+ if (IS_ERR(rt5682_dai_bclk))
+ return PTR_ERR(rt5682_dai_bclk);
ret = snd_soc_card_jack_new_pins(card, "Headset Jack",
SND_JACK_HEADSET |
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index f33946fb895d..51a0de248497 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -696,6 +696,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vivobook_ASUSLaptop M6501RR_M6501RR"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK BM1503CDA"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "PM1503CDA"),
+ }
+ },
{}
};
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index aa0062f3aa91..3a61f222d85f 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -711,6 +711,7 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv)
switch (type & CS42L43_HSDET_TYPE_STS_MASK) {
case 0x0: // CTIA
case 0x1: // OMTP
+ case 0x4:
return cs42l43_run_load_detect(priv, true);
case 0x2: // 3-pole
return cs42l43_run_load_detect(priv, false);
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 47933afdb726..c9f92d445f4c 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -999,35 +999,31 @@ EXPORT_SYMBOL_GPL(graph_util_card_probe);
int graph_util_is_ports0(struct device_node *np)
{
- struct device_node *port, *ports, *ports0, *top;
- int ret;
+ struct device_node *parent __free(device_node) = of_get_parent(np);
+ struct device_node *port;
/* np is "endpoint" or "port" */
- if (of_node_name_eq(np, "endpoint")) {
- port = of_get_parent(np);
- } else {
+ if (of_node_name_eq(np, "endpoint"))
+ port = parent;
+ else
port = np;
- of_node_get(port);
- }
- ports = of_get_parent(port);
- top = of_get_parent(ports);
- ports0 = of_get_child_by_name(top, "ports");
+ struct device_node *ports __free(device_node) = of_get_parent(port);
+ const char *at = strchr(kbasename(ports->full_name), '@');
- ret = ports0 == ports;
-
- of_node_put(port);
- of_node_put(ports);
- of_node_put(ports0);
- of_node_put(top);
-
- return ret;
+ /*
+ * Since child iteration order may differ
+ * between a base DT and DT overlays,
+ * string match "ports" or "ports@0" in the node name instead.
+ */
+ return !at || !strcmp(at, "@0");
}
EXPORT_SYMBOL_GPL(graph_util_is_ports0);
static int graph_get_dai_id(struct device_node *ep)
{
- struct device_node *node;
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
+ struct device_node *port __free(device_node) = of_get_parent(ep);
struct device_node *endpoint;
struct of_endpoint info;
int i, id;
@@ -1050,13 +1046,10 @@ static int graph_get_dai_id(struct device_node *ep)
if (of_property_present(ep, "reg"))
return info.id;
- node = of_get_parent(ep);
- ret = of_property_present(node, "reg");
- of_node_put(node);
+ ret = of_property_present(port, "reg");
if (ret)
return info.port;
}
- node = of_graph_get_port_parent(ep);
/*
* Non HDMI sound case, counting port/endpoint on its DT
@@ -1070,8 +1063,6 @@ static int graph_get_dai_id(struct device_node *ep)
i++;
}
- of_node_put(node);
-
if (id < 0)
return -ENODEV;
@@ -1081,7 +1072,6 @@ static int graph_get_dai_id(struct device_node *ep)
int graph_util_parse_dai(struct device *dev, struct device_node *ep,
struct snd_soc_dai_link_component *dlc, int *is_single_link)
{
- struct device_node *node;
struct of_phandle_args args = {};
struct snd_soc_dai *dai;
int ret;
@@ -1089,7 +1079,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
if (!ep)
return 0;
- node = of_graph_get_port_parent(ep);
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
/*
* Try to find from DAI node
@@ -1131,10 +1121,8 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
* if he unbinded CPU or Codec.
*/
ret = snd_soc_get_dlc(&args, dlc);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
return ret;
- }
parse_dai_end:
if (is_single_link)
diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
index 01299bad456d..34472cebb66c 100644
--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
@@ -844,6 +844,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
.ack = q6apm_dai_ack,
.compress_ops = &q6apm_dai_compress_ops,
.use_dai_pcm_id = true,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
};
static int q6apm_dai_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
index b46aff1110e1..cba8548415ad 100644
--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
@@ -278,6 +278,7 @@ static const struct snd_soc_component_driver q6apm_lpass_dai_component = {
.of_xlate_dai_name = q6dsp_audio_ports_of_xlate_dai_name,
.be_pcm_base = AUDIOREACH_BE_PCM_BASE,
.use_dai_pcm_id = true,
+ .remove_order = SND_SOC_COMP_ORDER_FIRST,
};
static int q6apm_lpass_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c
index ca57413cb784..e6258e8dfa2b 100644
--- a/sound/soc/qcom/qdsp6/q6apm.c
+++ b/sound/soc/qcom/qdsp6/q6apm.c
@@ -732,6 +732,7 @@ static const struct snd_soc_component_driver q6apm_audio_component = {
.name = APM_AUDIO_DRV_NAME,
.probe = q6apm_audio_probe,
.remove = q6apm_audio_remove,
+ .remove_order = SND_SOC_COMP_ORDER_LAST,
};
static int apm_probe(gpr_device_t *gdev)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4ac870c2dafa..a1e382991426 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -456,8 +456,7 @@ static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
list_del(&rtd->list);
- if (delayed_work_pending(&rtd->delayed_work))
- flush_delayed_work(&rtd->delayed_work);
+ flush_delayed_work(&rtd->delayed_work);
snd_soc_pcm_component_free(rtd);
/*
@@ -1838,12 +1837,15 @@ static void cleanup_dmi_name(char *name)
/*
* Check if a DMI field is valid, i.e. not containing any string
- * in the black list.
+ * in the black list and not the empty string.
*/
static int is_dmi_valid(const char *field)
{
int i = 0;
+ if (!field[0])
+ return 0;
+
while (dmi_blacklist[i]) {
if (strstr(field, dmi_blacklist[i]))
return 0;
@@ -2115,6 +2117,9 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card)
for_each_card_rtds(card, rtd)
if (rtd->initialized)
snd_soc_link_exit(rtd);
+ /* flush delayed work before removing DAIs and DAPM widgets */
+ snd_soc_flush_all_delayed_work(card);
+
/* remove and free each DAI */
soc_remove_link_dais(card);
soc_remove_link_components(card);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 9d22613f71e2..2616a7efcc21 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -224,6 +224,7 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
packet = ctx->packet_size[idx];
if (packet) {
+ packet = min(packet, ep->maxframesize);
if (avail && packet >= avail)
return -EAGAIN;
return packet;
diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
index 124284010417..fe1d6e512699 100644
--- a/sound/usb/mixer_scarlett2.c
+++ b/sound/usb/mixer_scarlett2.c
@@ -8579,6 +8579,8 @@ static int scarlett2_find_fc_interface(struct usb_device *dev,
if (desc->bInterfaceClass != 255)
continue;
+ if (desc->bNumEndpoints < 1)
+ continue;
epd = get_endpoint(intf->altsetting, 0);
private->bInterfaceNumber = desc->bInterfaceNumber;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 41752b819746..5c3a97ea46e0 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2351,6 +2351,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x2040, 0x7281, /* Hauppauge HVR-950Q-MXL */
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
+ DEVICE_FLG(0x20b1, 0x2009, /* XMOS Ltd DIYINHK USB Audio 2.0 */
+ QUIRK_FLAG_SKIP_IMPLICIT_FB | QUIRK_FLAG_DSD_RAW),
DEVICE_FLG(0x2040, 0x8200, /* Hauppauge Woodbury */
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 8a48cc2536f5..32cf48f2da9a 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -157,8 +157,11 @@ static int load_xbc_file(const char *path, char **buf)
if (fd < 0)
return -errno;
ret = fstat(fd, &stat);
- if (ret < 0)
- return -errno;
+ if (ret < 0) {
+ ret = -errno;
+ close(fd);
+ return ret;
+ }
ret = load_xbc_fd(fd, buf, stat.st_size);
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 02d1fccd495f..4f6f3121d8ec 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -91,10 +91,12 @@ $(LIBSUBCMD)-clean:
$(Q)$(RM) -r -- $(LIBSUBCMD_OUTPUT)
clean: $(LIBSUBCMD)-clean
- $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
- $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ $(Q)find $(OUTPUT) \( -name '*.o' -o -name '\.*.cmd' -o -name '\.*.d' \) -type f -print | xargs $(RM)
$(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
+mrproper: clean
+ $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
+
FORCE:
-.PHONY: clean FORCE
+.PHONY: clean mrproper FORCE
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index a56cf8b0a7d4..09c484182d5b 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -18,6 +18,7 @@
#include <poll.h>
#include <ctype.h>
#include <linux/capability.h>
+#include <linux/err.h>
#include <linux/string.h>
#include "debug.h"
@@ -998,8 +999,12 @@ static int prepare_func_profile(struct perf_ftrace *ftrace)
ftrace->graph_tail = 1;
ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL);
- if (ftrace->profile_hash == NULL)
- return -ENOMEM;
+ if (IS_ERR(ftrace->profile_hash)) {
+ int err = PTR_ERR(ftrace->profile_hash);
+
+ ftrace->profile_hash = NULL;
+ return err;
+ }
return 0;
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index cb8f191e19fd..890cc0a69fa5 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -44,6 +44,7 @@
#include "strbuf.h"
#include <regex.h>
#include <linux/bitops.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
@@ -135,8 +136,10 @@ static int annotated_source__alloc_histograms(struct annotated_source *src,
return -1;
src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
- if (src->samples == NULL)
+ if (IS_ERR(src->samples)) {
zfree(&src->histograms);
+ src->samples = NULL;
+ }
return src->histograms ? 0 : -1;
}
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index 2dc93199ac25..8a6f450c6f8e 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -408,7 +408,7 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
start = map__unmap_ip(map, sym->start);
end = map__unmap_ip(map, sym->end);
- ops->target.outside = target.addr < start || target.addr > end;
+ ops->target.outside = target.addr < start || target.addr >= end;
/*
* FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
index e5db897586bb..7a855699ff24 100644
--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
@@ -6,8 +6,10 @@
#define __HID_BPF_HELPERS_H
/* "undefine" structs and enums in vmlinux.h, because we "override" them below */
+#define bpf_wq bpf_wq___not_used
#define hid_bpf_ctx hid_bpf_ctx___not_used
#define hid_bpf_ops hid_bpf_ops___not_used
+#define hid_device hid_device___not_used
#define hid_report_type hid_report_type___not_used
#define hid_class_request hid_class_request___not_used
#define hid_bpf_attach_flags hid_bpf_attach_flags___not_used
@@ -24,8 +26,10 @@
#include "vmlinux.h"
+#undef bpf_wq
#undef hid_bpf_ctx
#undef hid_bpf_ops
+#undef hid_device
#undef hid_report_type
#undef hid_class_request
#undef hid_bpf_attach_flags
@@ -52,6 +56,14 @@ enum hid_report_type {
HID_REPORT_TYPES,
};
+struct hid_device {
+ unsigned int id;
+} __attribute__((preserve_access_index));
+
+struct bpf_wq {
+ __u64 __opaque[2];
+};
+
struct hid_bpf_ctx {
struct hid_device *hid;
__u32 allocated_size;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 91271cfb950f..f2c71361bd78 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -91,6 +91,24 @@ CBPF_MPTCP_SUBOPTION_ADD_ADDR="14,
6 0 0 65535,
6 0 0 0"
+# IPv4: TCP hdr of 48B, a first suboption of 12B (DACK8), the RM_ADDR suboption
+# generated using "nfbpf_compile '(ip[32] & 0xf0) == 0xc0 && ip[53] == 0x0c &&
+# (ip[66] & 0xf0) == 0x40'"
+CBPF_MPTCP_SUBOPTION_RM_ADDR="13,
+ 48 0 0 0,
+ 84 0 0 240,
+ 21 0 9 64,
+ 48 0 0 32,
+ 84 0 0 240,
+ 21 0 6 192,
+ 48 0 0 53,
+ 21 0 4 12,
+ 48 0 0 66,
+ 84 0 0 240,
+ 21 0 1 64,
+ 6 0 0 65535,
+ 6 0 0 0"
+
init_partial()
{
capout=$(mktemp)
@@ -3867,6 +3885,14 @@ endpoint_tests()
chk_subflow_nr "after no reject" 3
chk_mptcp_info subflows 2 subflows 2
+ # To make sure RM_ADDR are sent over a different subflow, but
+ # allow the rest to quickly and cleanly close the subflow
+ local ipt=1
+ ip netns exec "${ns2}" ${iptables} -I OUTPUT -s "10.0.1.2" \
+ -p tcp -m tcp --tcp-option 30 \
+ -m bpf --bytecode \
+ "$CBPF_MPTCP_SUBOPTION_RM_ADDR" \
+ -j DROP || ipt=0
local i
for i in $(seq 3); do
pm_nl_del_endpoint $ns2 1 10.0.1.2
@@ -3879,6 +3905,7 @@ endpoint_tests()
chk_subflow_nr "after re-add id 0 ($i)" 3
chk_mptcp_info subflows 3 subflows 3
done
+ [ ${ipt} = 1 ] && ip netns exec "${ns2}" ${iptables} -D OUTPUT 1
mptcp_lib_kill_group_wait $tests_pid
@@ -3922,38 +3949,54 @@ endpoint_tests()
$ns1 10.0.2.1 id 1 flags signal
chk_subflow_nr "before delete" 2
chk_mptcp_info subflows 1 subflows 1
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 1
pm_nl_del_endpoint $ns1 1 10.0.2.1
pm_nl_del_endpoint $ns1 2 224.0.0.1
sleep 0.5
chk_subflow_nr "after delete" 1
chk_mptcp_info subflows 0 subflows 0
+ chk_mptcp_info add_addr_signal 0 add_addr_accepted 0
pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 id 2 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-add" 3
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
+ # To make sure RM_ADDR are sent over a different subflow, but
+ # allow the rest to quickly and cleanly close the subflow
+ local ipt=1
+ ip netns exec "${ns1}" ${iptables} -I OUTPUT -s "10.0.1.1" \
+ -p tcp -m tcp --tcp-option 30 \
+ -m bpf --bytecode \
+ "$CBPF_MPTCP_SUBOPTION_RM_ADDR" \
+ -j DROP || ipt=0
pm_nl_del_endpoint $ns1 42 10.0.1.1
sleep 0.5
chk_subflow_nr "after delete ID 0" 2
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
+ [ ${ipt} = 1 ] && ip netns exec "${ns1}" ${iptables} -D OUTPUT 1
pm_nl_add_endpoint $ns1 10.0.1.1 id 99 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-add ID 0" 3
chk_mptcp_info subflows 3 subflows 3
+ chk_mptcp_info add_addr_signal 3 add_addr_accepted 2
pm_nl_del_endpoint $ns1 99 10.0.1.1
sleep 0.5
chk_subflow_nr "after re-delete ID 0" 2
chk_mptcp_info subflows 2 subflows 2
+ chk_mptcp_info add_addr_signal 2 add_addr_accepted 2
pm_nl_add_endpoint $ns1 10.0.1.1 id 88 flags signal
wait_mpj $ns2
chk_subflow_nr "after re-re-add ID 0" 3
chk_mptcp_info subflows 3 subflows 3
+ chk_mptcp_info add_addr_signal 3 add_addr_accepted 2
mptcp_lib_kill_group_wait $tests_pid
kill_events_pids
prev parent reply other threads:[~2026-03-25 10:16 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-25 10:15 Linux 6.12.78 Greg Kroah-Hartman
2026-03-25 10:15 ` Greg Kroah-Hartman [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=2026032555-propeller-snide-52f6@gregkh \
--to=gregkh@linuxfoundation.org \
--cc=akpm@linux-foundation.org \
--cc=jslaby@suse.cz \
--cc=linux-kernel@vger.kernel.org \
--cc=lwn@lwn.net \
--cc=stable@vger.kernel.org \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox