diff --git a/Documentation/conf.py b/Documentation/conf.py index dfc19c915d5c4b6ec33739e3739be87bd330da5e..e385e24fe9e72eca447123a3543ad0d0fe972465 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -345,9 +345,9 @@ sys.stderr.write("Using %s theme\n" % html_theme) html_static_path = ['sphinx-static'] # If true, Docutils "smart quotes" will be used to convert quotes and dashes -# to typographically correct entities. This will convert "--" to "—", -# which is not always what we want, so disable it. -smartquotes = False +# to typographically correct entities. However, conversion of "--" to "—" +# is not always what we want, so enable only quotes. +smartquotes_action = 'q' # Custom sidebar templates, maps document names to template names. # Note that the RTD theme ignores this diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst index 0ffeece1e0c8e94c3b25185b1de5ccc15350db52..6332e8395263b04935758d42a17adfe690fcd042 100644 --- a/Documentation/userspace-api/media/mediactl/media-types.rst +++ b/Documentation/userspace-api/media/mediactl/media-types.rst @@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements are origins of links. * - ``MEDIA_PAD_FL_MUST_CONNECT`` - - If this flag is set and the pad is linked to any other pad, then - at least one of those links must be enabled for the entity to be - able to stream. There could be temporary reasons (e.g. device - configuration dependent) for the pad to need enabled links even - when this flag isn't set; the absence of the flag doesn't imply - there is none. + - If this flag is set, then for this pad to be able to stream, it must + be connected by at least one enabled link. There could be temporary + reasons (e.g. device configuration dependent) for the pad to need + enabled links even when this flag isn't set; the absence of the flag + doesn't imply there is none. One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE`` diff --git a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts index 04f1ae1382e7a357ed51bd455154abf3f1a6de61..bc64348b8218519620bf22acbef2fd2f038ae608 100644 --- a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts +++ b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts @@ -28,7 +28,7 @@ &uart3 { &twsi1 { status = "okay"; pmic: max8925@3c { - compatible = "maxium,max8925"; + compatible = "maxim,max8925"; reg = <0x3c>; interrupts = <1>; interrupt-parent = <&intcmux4>; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index 8e330d2e2e224e913b6cbf03520e2f46c72b3317..b75de7caaa7e51d3d8601a0765886dc05d06eb78 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -2098,8 +2098,16 @@ pcie1: pci@1c08000 { ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>, <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>; - interrupts = ; - interrupt-names = "msi"; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "msi0", "msi1", "msi2", "msi3", + "msi4", "msi5", "msi6", "msi7"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts index f29cce5186acd5433581b969b107e4ad47f81458..c4bfe43471f7ca5e4d0e7debdf20475c3f37b3b7 100644 --- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts @@ -743,7 +743,7 @@ &swr2 { wcd_tx: codec@0,3 { compatible = "sdw20217010d00"; reg = <0 3>; - qcom,tx-port-mapping = <1 1 2 3>; + qcom,tx-port-mapping = <2 2 3 4>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts index 2c09ce8aeafd9b3f3b44644521430e31a463b4bc..7a70cc594279795793783699d5a70145891a667e 100644 --- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts +++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts @@ -835,7 +835,7 @@ &swr2 { wcd_tx: codec@0,3 { compatible = "sdw20217010d00"; reg = <0 3>; - qcom,tx-port-mapping = <1 1 2 3>; + qcom,tx-port-mapping = <2 2 3 4>; }; }; diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 5937d5edaba1eac5a0c4e4b055c3e77fcbe3bf62..000a28e1c5e8d43aba8ed5185e86c654cf5ad5a0 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -97,26 +97,28 @@ * version takes two arguments: a src and destination register. * However, the source and destination registers can not be * the same register. + * + * We use add,l to avoid clobbering the C/B bits in the PSW. */ .macro tophys grvirt, grphys - ldil L%(__PAGE_OFFSET), \grphys - sub \grvirt, \grphys, \grphys + ldil L%(-__PAGE_OFFSET), \grphys + addl \grvirt, \grphys, \grphys .endm - + .macro tovirt grphys, grvirt ldil L%(__PAGE_OFFSET), \grvirt - add \grphys, \grvirt, \grvirt + addl \grphys, \grvirt, \grvirt .endm .macro tophys_r1 gr - ldil L%(__PAGE_OFFSET), %r1 - sub \gr, %r1, \gr + ldil L%(-__PAGE_OFFSET), %r1 + addl \gr, %r1, \gr .endm - + .macro tovirt_r1 gr ldil L%(__PAGE_OFFSET), %r1 - add \gr, %r1, \gr + addl \gr, %r1, \gr .endm .macro delay value diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index 3c43baca7b397ddd4b4b09c671066e9b9cca81e1..2aceebcd695c8057d14e8cf58f45e230a9f16d38 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) " addc %0, %5, %0\n" " addc %0, %3, %0\n" "1: ldws,ma 4(%1), %3\n" -" addib,< 0, %2, 1b\n" +" addib,> -1, %2, 1b\n" " addc %0, %3, %0\n" "\n" " extru %0, 31, 16, %4\n" @@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ** Try to keep 4 registers with "live" values ahead of the ALU. */ +" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */ " ldd,ma 8(%1), %4\n" /* get 1st saddr word */ " ldd,ma 8(%2), %5\n" /* get 1st daddr word */ " add %4, %0, %0\n" @@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ " extrd,u %0, 31, 32, %4\n"/* copy upper half down */ " depdi 0, 31, 32, %0\n"/* clear upper half */ -" add %4, %0, %0\n" /* fold into 32-bits */ -" addc 0, %0, %0\n" /* add carry */ +" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */ +" addc 0, %0, %0\n" /* add final carry */ #else @@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, " ldw,ma 4(%2), %7\n" /* 4th daddr */ " addc %6, %0, %0\n" " addc %7, %0, %0\n" -" addc %3, %0, %0\n" /* fold in proto+len, catch carry */ +" addc %3, %0, %0\n" /* fold in proto+len */ +" addc 0, %0, %0\n" /* add carry */ #endif : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len), diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index c520e551a165258609cba5e068037493bd7e57a8..a8e75e5b884a701ad9c84f828a4931821d1444e3 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -169,6 +169,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; + unsigned long shift, temp1; __u64 val = 0; ASM_EXCEPTIONTABLE_VAR(ret); @@ -180,25 +181,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) #ifdef CONFIG_64BIT __asm__ __volatile__ ( -" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ -" mtsp %4, %%sr1\n" -" depd %%r0,63,3,%3\n" -"1: ldd 0(%%sr1,%3),%0\n" -"2: ldd 8(%%sr1,%3),%%r20\n" -" subi 64,%%r19,%%r19\n" -" mtsar %%r19\n" -" shrpd %0,%%r20,%%sar,%0\n" +" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */ +" mtsp %5, %%sr1\n" +" depd %%r0,63,3,%2\n" +"1: ldd 0(%%sr1,%2),%0\n" +"2: ldd 8(%%sr1,%2),%4\n" +" subi 64,%3,%3\n" +" mtsar %3\n" +" shrpd %0,%4,%%sar,%0\n" "3: \n" ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") - : "=r" (val), "+r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20" ); + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); #else - { - unsigned long shift, temp1; __asm__ __volatile__ ( -" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */ +" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */ " mtsp %5, %%sr1\n" " dep %%r0,31,2,%2\n" "1: ldw 0(%%sr1,%2),%0\n" @@ -214,7 +212,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1") : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) : "r" (regs->isr) ); - } #endif DPRINTF("val = 0x%llx\n", val); diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h index a21f529c43d96b548690aeff283820ce07a6e4a1..8359c06d92d9f8a486c027aad3b76dea1234a226 100644 --- a/arch/powerpc/include/asm/reg_fsl_emb.h +++ b/arch/powerpc/include/asm/reg_fsl_emb.h @@ -12,9 +12,16 @@ #ifndef __ASSEMBLY__ /* Performance Monitor Registers */ #define mfpmr(rn) ({unsigned int rval; \ - asm volatile("mfpmr %0," __stringify(rn) \ + asm volatile(".machine push; " \ + ".machine e300; " \ + "mfpmr %0," __stringify(rn) ";" \ + ".machine pop; " \ : "=r" (rval)); rval;}) -#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v)) +#define mtpmr(rn, v) asm volatile(".machine push; " \ + ".machine e300; " \ + "mtpmr " __stringify(rn) ",%0; " \ + ".machine pop; " \ + : : "r" (v)) #endif /* __ASSEMBLY__ */ /* Freescale Book E Performance Monitor APU Registers */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 0b5878c3125b1cd67ff3f44cedf83d514478c262..77364729a1b617c6040cafa6cae59e66023b19cc 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -375,6 +375,18 @@ static int __init early_init_dt_scan_cpus(unsigned long node, if (IS_ENABLED(CONFIG_PPC64)) boot_cpu_hwid = be32_to_cpu(intserv[found_thread]); + if (nr_cpu_ids % nthreads != 0) { + set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads)); + pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n", + nr_cpu_ids); + } + + if (boot_cpuid >= nr_cpu_ids) { + set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads))); + pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n", + boot_cpuid, nr_cpu_ids); + } + /* * PAPR defines "logical" PVR values for cpus that * meet various levels of the architecture: diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 6eac63e79a89957ae98337f407b33db3f7beff93..0ab65eeb93ee3a73346a2d9e7b4e5617f6b40ed3 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -76,7 +76,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o -CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec) +CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec) # Enable CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index 0a7ffcfd59cda0ade67bd7c640b1f69edc758e30..e2eed8f97665fb5029df9cc6969a3f44502de77f 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -1,256 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* parport.h: sparc64 specific parport initialization and dma. - * - * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) - */ +#ifndef ___ASM_SPARC_PARPORT_H +#define ___ASM_SPARC_PARPORT_H -#ifndef _ASM_SPARC64_PARPORT_H -#define _ASM_SPARC64_PARPORT_H 1 - -#include -#include - -#include -#include -#include - -#define PARPORT_PC_MAX_PORTS PARPORT_MAX - -/* - * While sparc64 doesn't have an ISA DMA API, we provide something that looks - * close enough to make parport_pc happy - */ -#define HAS_DMA - -#ifdef CONFIG_PARPORT_PC_FIFO -static DEFINE_SPINLOCK(dma_spin_lock); - -#define claim_dma_lock() \ -({ unsigned long flags; \ - spin_lock_irqsave(&dma_spin_lock, flags); \ - flags; \ -}) - -#define release_dma_lock(__flags) \ - spin_unlock_irqrestore(&dma_spin_lock, __flags); +#if defined(__sparc__) && defined(__arch64__) +#include +#else +#include +#endif #endif -static struct sparc_ebus_info { - struct ebus_dma_info info; - unsigned int addr; - unsigned int count; - int lock; - - struct parport *port; -} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS]; - -static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); - -static inline int request_dma(unsigned int dmanr, const char *device_id) -{ - if (dmanr >= PARPORT_PC_MAX_PORTS) - return -EINVAL; - if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) - return -EBUSY; - return 0; -} - -static inline void free_dma(unsigned int dmanr) -{ - if (dmanr >= PARPORT_PC_MAX_PORTS) { - printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); - return; - } - if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { - printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); - return; - } -} - -static inline void enable_dma(unsigned int dmanr) -{ - ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); - - if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info, - sparc_ebus_dmas[dmanr].addr, - sparc_ebus_dmas[dmanr].count)) - BUG(); -} - -static inline void disable_dma(unsigned int dmanr) -{ - ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); -} - -static inline void clear_dma_ff(unsigned int dmanr) -{ - /* nothing */ -} - -static inline void set_dma_mode(unsigned int dmanr, char mode) -{ - ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); -} - -static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) -{ - sparc_ebus_dmas[dmanr].addr = addr; -} - -static inline void set_dma_count(unsigned int dmanr, unsigned int count) -{ - sparc_ebus_dmas[dmanr].count = count; -} - -static inline unsigned int get_dma_residue(unsigned int dmanr) -{ - return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); -} - -static int ecpp_probe(struct platform_device *op) -{ - unsigned long base = op->resource[0].start; - unsigned long config = op->resource[1].start; - unsigned long d_base = op->resource[2].start; - unsigned long d_len; - struct device_node *parent; - struct parport *p; - int slot, err; - - parent = op->dev.of_node->parent; - if (of_node_name_eq(parent, "dma")) { - p = parport_pc_probe_port(base, base + 0x400, - op->archdata.irqs[0], PARPORT_DMA_NOFIFO, - op->dev.parent->parent, 0); - if (!p) - return -ENOMEM; - dev_set_drvdata(&op->dev, p); - return 0; - } - - for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) { - if (!test_and_set_bit(slot, dma_slot_map)) - break; - } - err = -ENODEV; - if (slot >= PARPORT_PC_MAX_PORTS) - goto out_err; - - spin_lock_init(&sparc_ebus_dmas[slot].info.lock); - - d_len = (op->resource[2].end - d_base) + 1UL; - sparc_ebus_dmas[slot].info.regs = - of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA"); - - if (!sparc_ebus_dmas[slot].info.regs) - goto out_clear_map; - - sparc_ebus_dmas[slot].info.flags = 0; - sparc_ebus_dmas[slot].info.callback = NULL; - sparc_ebus_dmas[slot].info.client_cookie = NULL; - sparc_ebus_dmas[slot].info.irq = 0xdeadbeef; - strcpy(sparc_ebus_dmas[slot].info.name, "parport"); - if (ebus_dma_register(&sparc_ebus_dmas[slot].info)) - goto out_unmap_regs; - - ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1); - - /* Configure IRQ to Push Pull, Level Low */ - /* Enable ECP, set bit 2 of the CTR first */ - outb(0x04, base + 0x02); - ns87303_modify(config, PCR, - PCR_EPP_ENABLE | - PCR_IRQ_ODRAIN, - PCR_ECP_ENABLE | - PCR_ECP_CLK_ENA | - PCR_IRQ_POLAR); - - /* CTR bit 5 controls direction of port */ - ns87303_modify(config, PTR, - 0, PTR_LPT_REG_DIR); - - p = parport_pc_probe_port(base, base + 0x400, - op->archdata.irqs[0], - slot, - op->dev.parent, - 0); - err = -ENOMEM; - if (!p) - goto out_disable_irq; - - dev_set_drvdata(&op->dev, p); - - return 0; - -out_disable_irq: - ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); - ebus_dma_unregister(&sparc_ebus_dmas[slot].info); - -out_unmap_regs: - of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len); - -out_clear_map: - clear_bit(slot, dma_slot_map); - -out_err: - return err; -} - -static int ecpp_remove(struct platform_device *op) -{ - struct parport *p = dev_get_drvdata(&op->dev); - int slot = p->dma; - - parport_pc_unregister_port(p); - - if (slot != PARPORT_DMA_NOFIFO) { - unsigned long d_base = op->resource[2].start; - unsigned long d_len; - - d_len = (op->resource[2].end - d_base) + 1UL; - - ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); - ebus_dma_unregister(&sparc_ebus_dmas[slot].info); - of_iounmap(&op->resource[2], - sparc_ebus_dmas[slot].info.regs, - d_len); - clear_bit(slot, dma_slot_map); - } - - return 0; -} - -static const struct of_device_id ecpp_match[] = { - { - .name = "ecpp", - }, - { - .name = "parallel", - .compatible = "ecpp", - }, - { - .name = "parallel", - .compatible = "ns87317-ecpp", - }, - { - .name = "parallel", - .compatible = "pnpALI,1533,3", - }, - {}, -}; - -static struct platform_driver ecpp_driver = { - .driver = { - .name = "ecpp", - .of_match_table = ecpp_match, - }, - .probe = ecpp_probe, - .remove = ecpp_remove, -}; - -static int parport_pc_find_nonpci_ports(int autoirq, int autodma) -{ - return platform_driver_register(&ecpp_driver); -} - -#endif /* !(_ASM_SPARC64_PARPORT_H */ diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h new file mode 100644 index 0000000000000000000000000000000000000000..0a7ffcfd59cda0ade67bd7c640b1f69edc758e30 --- /dev/null +++ b/arch/sparc/include/asm/parport_64.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* parport.h: sparc64 specific parport initialization and dma. + * + * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) + */ + +#ifndef _ASM_SPARC64_PARPORT_H +#define _ASM_SPARC64_PARPORT_H 1 + +#include +#include + +#include +#include +#include + +#define PARPORT_PC_MAX_PORTS PARPORT_MAX + +/* + * While sparc64 doesn't have an ISA DMA API, we provide something that looks + * close enough to make parport_pc happy + */ +#define HAS_DMA + +#ifdef CONFIG_PARPORT_PC_FIFO +static DEFINE_SPINLOCK(dma_spin_lock); + +#define claim_dma_lock() \ +({ unsigned long flags; \ + spin_lock_irqsave(&dma_spin_lock, flags); \ + flags; \ +}) + +#define release_dma_lock(__flags) \ + spin_unlock_irqrestore(&dma_spin_lock, __flags); +#endif + +static struct sparc_ebus_info { + struct ebus_dma_info info; + unsigned int addr; + unsigned int count; + int lock; + + struct parport *port; +} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS]; + +static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); + +static inline int request_dma(unsigned int dmanr, const char *device_id) +{ + if (dmanr >= PARPORT_PC_MAX_PORTS) + return -EINVAL; + if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) + return -EBUSY; + return 0; +} + +static inline void free_dma(unsigned int dmanr) +{ + if (dmanr >= PARPORT_PC_MAX_PORTS) { + printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); + return; + } + if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { + printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); + return; + } +} + +static inline void enable_dma(unsigned int dmanr) +{ + ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); + + if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info, + sparc_ebus_dmas[dmanr].addr, + sparc_ebus_dmas[dmanr].count)) + BUG(); +} + +static inline void disable_dma(unsigned int dmanr) +{ + ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); +} + +static inline void clear_dma_ff(unsigned int dmanr) +{ + /* nothing */ +} + +static inline void set_dma_mode(unsigned int dmanr, char mode) +{ + ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); +} + +static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) +{ + sparc_ebus_dmas[dmanr].addr = addr; +} + +static inline void set_dma_count(unsigned int dmanr, unsigned int count) +{ + sparc_ebus_dmas[dmanr].count = count; +} + +static inline unsigned int get_dma_residue(unsigned int dmanr) +{ + return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); +} + +static int ecpp_probe(struct platform_device *op) +{ + unsigned long base = op->resource[0].start; + unsigned long config = op->resource[1].start; + unsigned long d_base = op->resource[2].start; + unsigned long d_len; + struct device_node *parent; + struct parport *p; + int slot, err; + + parent = op->dev.of_node->parent; + if (of_node_name_eq(parent, "dma")) { + p = parport_pc_probe_port(base, base + 0x400, + op->archdata.irqs[0], PARPORT_DMA_NOFIFO, + op->dev.parent->parent, 0); + if (!p) + return -ENOMEM; + dev_set_drvdata(&op->dev, p); + return 0; + } + + for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) { + if (!test_and_set_bit(slot, dma_slot_map)) + break; + } + err = -ENODEV; + if (slot >= PARPORT_PC_MAX_PORTS) + goto out_err; + + spin_lock_init(&sparc_ebus_dmas[slot].info.lock); + + d_len = (op->resource[2].end - d_base) + 1UL; + sparc_ebus_dmas[slot].info.regs = + of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA"); + + if (!sparc_ebus_dmas[slot].info.regs) + goto out_clear_map; + + sparc_ebus_dmas[slot].info.flags = 0; + sparc_ebus_dmas[slot].info.callback = NULL; + sparc_ebus_dmas[slot].info.client_cookie = NULL; + sparc_ebus_dmas[slot].info.irq = 0xdeadbeef; + strcpy(sparc_ebus_dmas[slot].info.name, "parport"); + if (ebus_dma_register(&sparc_ebus_dmas[slot].info)) + goto out_unmap_regs; + + ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1); + + /* Configure IRQ to Push Pull, Level Low */ + /* Enable ECP, set bit 2 of the CTR first */ + outb(0x04, base + 0x02); + ns87303_modify(config, PCR, + PCR_EPP_ENABLE | + PCR_IRQ_ODRAIN, + PCR_ECP_ENABLE | + PCR_ECP_CLK_ENA | + PCR_IRQ_POLAR); + + /* CTR bit 5 controls direction of port */ + ns87303_modify(config, PTR, + 0, PTR_LPT_REG_DIR); + + p = parport_pc_probe_port(base, base + 0x400, + op->archdata.irqs[0], + slot, + op->dev.parent, + 0); + err = -ENOMEM; + if (!p) + goto out_disable_irq; + + dev_set_drvdata(&op->dev, p); + + return 0; + +out_disable_irq: + ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); + ebus_dma_unregister(&sparc_ebus_dmas[slot].info); + +out_unmap_regs: + of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len); + +out_clear_map: + clear_bit(slot, dma_slot_map); + +out_err: + return err; +} + +static int ecpp_remove(struct platform_device *op) +{ + struct parport *p = dev_get_drvdata(&op->dev); + int slot = p->dma; + + parport_pc_unregister_port(p); + + if (slot != PARPORT_DMA_NOFIFO) { + unsigned long d_base = op->resource[2].start; + unsigned long d_len; + + d_len = (op->resource[2].end - d_base) + 1UL; + + ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); + ebus_dma_unregister(&sparc_ebus_dmas[slot].info); + of_iounmap(&op->resource[2], + sparc_ebus_dmas[slot].info.regs, + d_len); + clear_bit(slot, dma_slot_map); + } + + return 0; +} + +static const struct of_device_id ecpp_match[] = { + { + .name = "ecpp", + }, + { + .name = "parallel", + .compatible = "ecpp", + }, + { + .name = "parallel", + .compatible = "ns87317-ecpp", + }, + { + .name = "parallel", + .compatible = "pnpALI,1533,3", + }, + {}, +}; + +static struct platform_driver ecpp_driver = { + .driver = { + .name = "ecpp", + .of_match_table = ecpp_match, + }, + .probe = ecpp_probe, + .remove = ecpp_remove, +}; + +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) +{ + return platform_driver_register(&ecpp_driver); +} + +#endif /* !(_ASM_SPARC64_PARPORT_H */ diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 17cdfdbf1f3b78928e4bc8cea63a1d0ed2898df9..149adc09475306d5eb2f891f66860c18ed3f7608 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -279,7 +279,7 @@ static int __init setup_nmi_watchdog(char *str) if (!strncmp(str, "panic", 5)) panic_on_timeout = 1; - return 0; + return 1; } __setup("nmi_watchdog=", setup_nmi_watchdog); diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c index 136c78f28f8ba2c99a9cf066c9cfebf3a85aa77b..1bbf4335de4540598528240e6106896b8d6d5c4a 100644 --- a/arch/sparc/vdso/vma.c +++ b/arch/sparc/vdso/vma.c @@ -449,9 +449,8 @@ static __init int vdso_setup(char *s) unsigned long val; err = kstrtoul(s, 10, &val); - if (err) - return err; - vdso_enabled = val; - return 0; + if (!err) + vdso_enabled = val; + return 1; } __setup("vdso=", vdso_setup); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 07e045399348e2ad2621a09babff77e14b80506e..87aee638e1a5d894a02ae02b14a01d99f280ba38 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -629,7 +629,7 @@ void nmi_backtrace_stall_check(const struct cpumask *btp) msgp = nmi_check_stall_msg[idx]; if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1)) modp = ", but OK because ignore_nmis was set"; - if (nmi_seq & ~0x1) + if (nmi_seq & 0x1) msghp = " (CPU currently in NMI handler function)"; else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq) msghp = " (CPU exited one NMI handler function)"; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 773132c3bf5af760827f8dd0ab9601de702744a5..d65f91d0a8bca3bd156da45029a6975fea487c59 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -677,6 +677,11 @@ void kvm_set_cpu_caps(void) F(AMX_COMPLEX) ); + kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX, + F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) | + F(BHI_CTRL) | F(MCDT_NO) + ); + kvm_cpu_cap_mask(CPUID_D_1_EAX, F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd ); @@ -956,13 +961,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) break; /* function 7 has additional index. */ case 7: - entry->eax = min(entry->eax, 1u); + max_idx = entry->eax = min(entry->eax, 2u); cpuid_entry_override(entry, CPUID_7_0_EBX); cpuid_entry_override(entry, CPUID_7_ECX); cpuid_entry_override(entry, CPUID_7_EDX); - /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ - if (entry->eax == 1) { + /* KVM only supports up to 0x7.2, capped above via min(). */ + if (max_idx >= 1) { entry = do_host_cpuid(array, function, 1); if (!entry) goto out; @@ -972,6 +977,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->ebx = 0; entry->ecx = 0; } + if (max_idx >= 2) { + entry = do_host_cpuid(array, function, 2); + if (!entry) + goto out; + + cpuid_entry_override(entry, CPUID_7_2_EDX); + entry->ecx = 0; + entry->ebx = 0; + entry->eax = 0; + } break; case 0xa: { /* Architectural Performance Monitoring */ union cpuid10_eax eax; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 245b20973caee481055da37cd1ae01a78bbb335b..23fab75993a51386f8b4156ef0ca72a5ce90303b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -41,6 +41,7 @@ #include "ioapic.h" #include "trace.h" #include "x86.h" +#include "xen.h" #include "cpuid.h" #include "hyperv.h" #include "smm.h" @@ -499,8 +500,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) } /* Check if there are APF page ready requests pending */ - if (enabled) + if (enabled) { kvm_make_request(KVM_REQ_APF_READY, apic->vcpu); + kvm_xen_sw_enable_lapic(apic->vcpu); + } } static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index b816506783755a1ce87d511014c9903fe3e25798..aadefcaa9561d0a31e589784da7e871e4a0de2e0 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs { CPUID_7_1_EDX, CPUID_8000_0007_EDX, CPUID_8000_0022_EAX, + CPUID_7_2_EDX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, @@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs { #define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8) #define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14) +/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */ +#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0) +#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1) +#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2) +#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3) +#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4) +#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5) + /* CPUID level 0x80000007 (EDX). */ #define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8) @@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX}, [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, + [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, }; /* @@ -106,18 +116,19 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) */ static __always_inline u32 __feature_translate(int x86_feature) { - if (x86_feature == X86_FEATURE_SGX1) - return KVM_X86_FEATURE_SGX1; - else if (x86_feature == X86_FEATURE_SGX2) - return KVM_X86_FEATURE_SGX2; - else if (x86_feature == X86_FEATURE_SGX_EDECCSSA) - return KVM_X86_FEATURE_SGX_EDECCSSA; - else if (x86_feature == X86_FEATURE_CONSTANT_TSC) - return KVM_X86_FEATURE_CONSTANT_TSC; - else if (x86_feature == X86_FEATURE_PERFMON_V2) - return KVM_X86_FEATURE_PERFMON_V2; - - return x86_feature; +#define KVM_X86_TRANSLATE_FEATURE(f) \ + case X86_FEATURE_##f: return KVM_X86_FEATURE_##f + + switch (x86_feature) { + KVM_X86_TRANSLATE_FEATURE(SGX1); + KVM_X86_TRANSLATE_FEATURE(SGX2); + KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA); + KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC); + KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); + KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); + default: + return x86_feature; + } } static __always_inline u32 __feature_leaf(int x86_feature) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 40edf4d1974c530336e9f9044fd3b18b18ea8de3..0ea6016ad132a227c70a1a3e0c03b84ebc604ab4 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -471,7 +471,7 @@ void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable); } -static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) +void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) { struct kvm_lapic_irq irq = { }; int r; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index f8f1fe22d090696cb32b44f719d75e124fb4c8bf..f5841d9000aebd5b9584db188d89135fb6f3e11e 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -18,6 +18,7 @@ extern struct static_key_false_deferred kvm_xen_enabled; int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); +void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu); int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); @@ -36,6 +37,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm, const struct kvm_irq_routing_entry *ue); void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu); +static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) +{ + /* + * The local APIC is being enabled. If the per-vCPU upcall vector is + * set and the vCPU's evtchn_upcall_pending flag is set, inject the + * interrupt. + */ + if (static_branch_unlikely(&kvm_xen_enabled.key) && + vcpu->arch.xen.vcpu_info_cache.active && + vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu)) + kvm_xen_inject_vcpu_vector(vcpu); +} + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) { return static_branch_unlikely(&kvm_xen_enabled.key) && @@ -101,6 +115,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) { } +static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) +{ +} + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) { return false; diff --git a/block/bio.c b/block/bio.c index 270f6b99926eaa5b4f8866a95a360e135df58e51..62419aa09d731922e5d62cbc2649b1882da8bfaf 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1149,7 +1149,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) bio_for_each_folio_all(fi, bio) { struct page *page; - size_t done = 0; + size_t nr_pages; if (mark_dirty) { folio_lock(fi.folio); @@ -1157,10 +1157,11 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) folio_unlock(fi.folio); } page = folio_page(fi.folio, fi.offset / PAGE_SIZE); + nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - + fi.offset / PAGE_SIZE + 1; do { bio_release_page(bio, page++); - done += PAGE_SIZE; - } while (done < fi.length); + } while (--nr_pages != 0); } } EXPORT_SYMBOL_GPL(__bio_release_pages); diff --git a/block/blk-settings.c b/block/blk-settings.c index 0046b447268f912a1b587c6367adb88548571feb..7019b8e204d965161eff7e232d11abc56e8dab54 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -686,6 +686,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->zone_write_granularity = max(t->zone_write_granularity, b->zone_write_granularity); t->zoned = max(t->zoned, b->zoned); + if (!t->zoned) { + t->zone_write_granularity = 0; + t->max_zone_append_sectors = 0; + } return ret; } EXPORT_SYMBOL(blk_stack_limits); diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c index eea2a2fa4f0159af7ece0add14d5b23100f6d49f..45f90610313382cac117da4cbe93d1d782a96054 100644 --- a/drivers/accessibility/speakup/synth.c +++ b/drivers/accessibility/speakup/synth.c @@ -208,8 +208,10 @@ void spk_do_flush(void) wake_up_process(speakup_task); } -void synth_write(const char *buf, size_t count) +void synth_write(const char *_buf, size_t count) { + const unsigned char *buf = (const unsigned char *) _buf; + while (count--) synth_buffer_add(*buf++); synth_start(); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 42171f766dcba6126efebd3a3b8139f22ef1051b..5a5a9e978e85f3fc9d89cb7d43527dc1dd42a9b1 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev) return; if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && - wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) { enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } } /** diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 951fe3014a3f3feef41e75b39930ca176e5bb856..abccd571cf3ee8709e1a7eb411f9106e4c10b9b5 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1234,6 +1234,9 @@ static int btnxpuart_close(struct hci_dev *hdev) ps_wakeup(nxpdev); serdev_device_close(nxpdev->serdev); + skb_queue_purge(&nxpdev->txq); + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); return 0; } diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 1b350412d8a6be95abc2fb993ee40da0606f93b9..64c875657687d20f7da1894f76a6c4146f86ef15 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -919,8 +919,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask, int rc; u32 int_status; - INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); - rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL, tis_int_handler, IRQF_ONESHOT | flags, dev_name(&chip->dev), chip); @@ -1132,6 +1130,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->phy_ops = phy_ops; priv->locality_count = 0; mutex_init(&priv->locality_count_mutex); + INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func); dev_set_drvdata(&chip->dev, priv); diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c index cc2331d729fb6c6586d93deb97f67d4d99849273..3136ba1c2a59cc2c9726912df64a8cb9357fcb1e 100644 --- a/drivers/clk/qcom/gcc-ipq5018.c +++ b/drivers/clk/qcom/gcc-ipq5018.c @@ -856,6 +856,7 @@ static struct clk_rcg2 lpass_sway_clk_src = { static const struct freq_tbl ftbl_pcie0_aux_clk_src[] = { F(2000000, P_XO, 12, 0, 0), + { } }; static struct clk_rcg2 pcie0_aux_clk_src = { @@ -1098,6 +1099,7 @@ static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = { F(100000000, P_GPLL0, 8, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), F(320000000, P_GPLL0, 2.5, 0, 0), + { } }; static struct clk_rcg2 qpic_io_macro_clk_src = { @@ -1193,6 +1195,7 @@ static struct clk_rcg2 ubi0_axi_clk_src = { static const struct freq_tbl ftbl_ubi0_core_clk_src[] = { F(850000000, P_UBI32_PLL, 1, 0, 0), F(1000000000, P_UBI32_PLL, 1, 0, 0), + { } }; static struct clk_rcg2 ubi0_core_clk_src = { diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c index f9494fa1b871690ffba5d8674026065cde58cdae..2e4189e770d3ff051c7f93273a125b18d4027f35 100644 --- a/drivers/clk/qcom/gcc-ipq6018.c +++ b/drivers/clk/qcom/gcc-ipq6018.c @@ -1554,6 +1554,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = { static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { F(24000000, P_XO, 1, 0, 0), + { } }; static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = { @@ -1734,6 +1735,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { F(160000000, P_GPLL0, 5, 0, 0), F(216000000, P_GPLL6, 5, 0, 0), F(308570000, P_GPLL6, 3.5, 0, 0), + { } }; static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = { diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index b7faf12a511a16f786d81643a1f90296212886f2..7bc679871f324f5354fca91ac11657da447a8cff 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -644,6 +644,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { static const struct freq_tbl ftbl_pcie_aux_clk_src[] = { F(19200000, P_XO, 1, 0, 0), + { } }; static const struct clk_parent_data gcc_xo_gpll0_sleep_clk[] = { @@ -795,6 +796,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(160000000, P_GPLL0, 5, 0, 0), F(308570000, P_GPLL6, 3.5, 0, 0), + { } }; static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = { diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c index e8190108e1aef394eb546981b1cea8a6f6f0e2ba..0a3f846695b803fe4b93d394f96de393b1a55f1d 100644 --- a/drivers/clk/qcom/gcc-ipq9574.c +++ b/drivers/clk/qcom/gcc-ipq9574.c @@ -2082,6 +2082,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = { static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = { F(150000000, P_GPLL4, 8, 0, 0), F(300000000, P_GPLL4, 4, 0, 0), + { } }; static struct clk_rcg2 sdcc1_ice_core_clk_src = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 725cd52d2398ede3b4bcf42a7c1690dc86663cfc..ea4c3bf4fb9bf7f35d73db4a690099b4df3f7286 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -4037,3 +4037,4 @@ module_exit(gcc_sdm845_exit); MODULE_DESCRIPTION("QTI GCC SDM845 Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:gcc-sdm845"); +MODULE_SOFTDEP("pre: rpmhpd"); diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index 02fc21208dd14b9bcfd7ae641e34f6411d0f34e2..c89700ab93f9c6468c0b509305059a852228bbe0 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -348,6 +348,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { F(333430000, P_MMPLL1, 3.5, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), F(466800000, P_MMPLL1, 2.5, 0, 0), + { } }; static struct clk_rcg2 mmss_axi_clk_src = { @@ -372,6 +373,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(228570000, P_MMPLL0, 3.5, 0, 0), F(320000000, P_MMPLL0, 2.5, 0, 0), + { } }; static struct clk_rcg2 ocmemnoc_clk_src = { diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index 1f3bd302fe6ed4b7540a380723a39d56f2636570..6df22a67f02d3e8c2f9035a15dcfb366274304b9 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -290,6 +290,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = { F(291750000, P_MMPLL1, 4, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), F(466800000, P_MMPLL1, 2.5, 0, 0), + { } }; static struct clk_rcg2 mmss_axi_clk_src = { @@ -314,6 +315,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = { F(150000000, P_GPLL0, 4, 0, 0), F(291750000, P_MMPLL1, 4, 0, 0), F(400000000, P_MMPLL0, 2, 0, 0), + { } }; static struct clk_rcg2 ocmemnoc_clk_src = { diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 1791d37fbc53c57e0f13469934eee357c0de87cc..07f34199543966e8ae2d8f0cfb1ce72442e64a09 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -570,7 +570,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu, if (target_perf < capacity) des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity); - min_perf = READ_ONCE(cpudata->highest_perf); + min_perf = READ_ONCE(cpudata->lowest_perf); if (_min_perf < capacity) min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity); diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index a39e70bd4b21bbc4ecd9180e194f8cf335ab167b..621d14ea3b81afa75800bc55faeabc24d541f304 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -92,7 +92,8 @@ static void adf_device_reset_worker(struct work_struct *work) if (adf_dev_restart(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - if (reset_data->mode == ADF_DEV_RESET_ASYNC) + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; @@ -100,11 +101,19 @@ static void adf_device_reset_worker(struct work_struct *work) adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); - /* The dev is back alive. Notify the caller if in sync mode */ - if (reset_data->mode == ADF_DEV_RESET_SYNC) - complete(&reset_data->compl); - else + /* + * The dev is back alive. Notify the caller if in sync mode + * + * If device restart will take a more time than expected, + * the schedule_reset() function can timeout and exit. This can be + * detected by calling the completion_done() function. In this case + * the reset_data structure needs to be freed here. + */ + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) kfree(reset_data); + else + complete(&reset_data->compl); } static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, @@ -137,8 +146,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); ret = -EFAULT; + } else { + kfree(reset_data); } - kfree(reset_data); return ret; } return 0; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index a8d3fa81e4ec5d40fabe0f63018074372489b8d3..f9bc837e22bddc21e54663725ff07c3c8c790d36 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -494,7 +494,7 @@ static const struct drm_driver etnaviv_drm_driver = { .desc = "etnaviv DRM", .date = "20151214", .major = 1, - .minor = 3, + .minor = 4, }; /* diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c index 67201242438bed9225d5abfb76eb3bac07d56edc..8665f2658d51b302f7e2e8ad9f52a438ccbc5d6f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c @@ -265,6 +265,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) { struct etnaviv_chip_identity *ident = &gpu->identity; + const u32 product_id = ident->product_id; + const u32 customer_id = ident->customer_id; + const u32 eco_id = ident->eco_id; int i; for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) { @@ -278,6 +281,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu) etnaviv_chip_identities[i].eco_id == ~0U)) { memcpy(ident, &etnaviv_chip_identities[i], sizeof(*ident)); + + /* Restore some id values as ~0U aka 'don't care' might been used. */ + ident->product_id = product_id; + ident->customer_id = customer_id; + ident->eco_id = eco_id; + return true; } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 36987ef3fc300623eae6caff131f4d89ac4d1450..5fef0b31c11798e06696420fada0c799c166a0bd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, vmw_res_type(ctx) == vmw_res_dx_context) { for (i = 0; i < cotable_max; ++i) { res = vmw_context_cotable(ctx, i); - if (IS_ERR(res)) + if (IS_ERR_OR_NULL(res)) continue; ret = vmw_execbuf_res_val_add(sw_context, res, @@ -1266,6 +1266,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, return -EINVAL; cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY); + if (IS_ERR_OR_NULL(cotable_res)) + return cotable_res ? PTR_ERR(cotable_res) : -EINVAL; ret = vmw_cotable_notify(cotable_res, cmd->body.queryId); return ret; @@ -2484,6 +2486,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, return ret; res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; ret = vmw_cotable_notify(res, cmd->defined_id); if (unlikely(ret != 0)) return ret; @@ -2569,8 +2573,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, so_type = vmw_so_cmd_to_type(header->id); res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]); - if (IS_ERR(res)) - return PTR_ERR(res); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cotable_notify(res, cmd->defined_id); @@ -2689,6 +2693,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, return -EINVAL; res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; ret = vmw_cotable_notify(res, cmd->body.shaderId); if (ret) return ret; @@ -3010,6 +3016,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, } res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT); + if (IS_ERR_OR_NULL(res)) + return res ? PTR_ERR(res) : -EINVAL; ret = vmw_cotable_notify(res, cmd->body.soid); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b51578918cf8d5eed4ba3be4354aacdf0099605f..5681a1b42aa249069ff21ece63fda17e5ed74b5b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -184,13 +184,12 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h) */ static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) { - bool is_iomem; if (vps->surf) { if (vps->surf_mapped) return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); return vps->surf->snooper.image; } else if (vps->bo) - return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem); + return vmw_bo_map_and_cache(vps->bo); return NULL; } @@ -652,22 +651,12 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, { struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - bool is_iomem; if (vps->surf_mapped) { vmw_bo_unmap(vps->surf->res.guest_memory_bo); vps->surf_mapped = false; } - if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) { - const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); - - if (likely(ret == 0)) { - ttm_bo_kunmap(&vps->bo->map); - ttm_bo_unreserve(&vps->bo->tbo); - } - } - vmw_du_cursor_plane_unmap_cm(vps); vmw_du_put_cursor_mob(vcp, vps); @@ -703,6 +692,10 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, int ret = 0; if (vps->surf) { + if (vps->surf_mapped) { + vmw_bo_unmap(vps->surf->res.guest_memory_bo); + vps->surf_mapped = false; + } vmw_surface_unreference(&vps->surf); vps->surf = NULL; } diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 2a7a4b6b00942b34ee411ef1d2276176f4a132f7..9b02b304c2f5d4ca3c4c164cc0fe1fa7dc9bfd3a 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = { MODULE_DEVICE_TABLE(i2c, amc6821_id); +static const struct of_device_id __maybe_unused amc6821_of_match[] = { + { + .compatible = "ti,amc6821", + .data = (void *)amc6821, + }, + { } +}; + +MODULE_DEVICE_TABLE(of, amc6821_of_match); + static struct i2c_driver amc6821_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "amc6821", + .of_match_table = of_match_ptr(amc6821_of_match), }, .probe = amc6821_probe, .id_table = amc6821_id, diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index dd94667a623bd99833792dffacec3fcf4725f53e..1c0042fbbb5481aa651dd6bb1a4b822ed7949ec8 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -52,7 +52,7 @@ #define SARADC2_START BIT(4) #define SARADC2_SINGLE_MODE BIT(5) -#define SARADC2_CONV_CHANNELS GENMASK(15, 0) +#define SARADC2_CONV_CHANNELS GENMASK(3, 0) struct rockchip_saradc; @@ -102,12 +102,12 @@ static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn) writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC); writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC); val = FIELD_PREP(SARADC2_EN_END_INT, 1); - val |= val << 16; + val |= SARADC2_EN_END_INT << 16; writel_relaxed(val, info->regs + SARADC2_END_INT_EN); val = FIELD_PREP(SARADC2_START, 1) | FIELD_PREP(SARADC2_SINGLE_MODE, 1) | FIELD_PREP(SARADC2_CONV_CHANNELS, chn); - val |= val << 16; + val |= (SARADC2_START | SARADC2_SINGLE_MODE | SARADC2_CONV_CHANNELS) << 16; writel(val, info->regs + SARADC2_CONV_CON); } diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c index d76214fa9ad8645441e9c53814719ba0109f30ac..79719fc8a08fb495ab1ca50d7a401011d2f062d1 100644 --- a/drivers/leds/trigger/ledtrig-netdev.c +++ b/drivers/leds/trigger/ledtrig-netdev.c @@ -462,12 +462,12 @@ static int netdev_trig_notify(struct notifier_block *nb, trigger_data->duplex = DUPLEX_UNKNOWN; switch (evt) { case NETDEV_CHANGENAME: - get_device_state(trigger_data); - fallthrough; case NETDEV_REGISTER: dev_put(trigger_data->net_dev); dev_hold(dev); trigger_data->net_dev = dev; + if (evt == NETDEV_CHANGENAME) + get_device_state(trigger_data); break; case NETDEV_UNREGISTER: dev_put(trigger_data->net_dev); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 3d8ee9eb59ea6c604340ff00571705574342ace3..385e24f55ec00208b1b7cea0d135d87db6c58a3b 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -4042,7 +4042,9 @@ static void raid_resume(struct dm_target *ti) * Take this opportunity to check whether any failed * devices are reachable again. */ + mddev_lock_nointr(mddev); attempt_restore_of_faulty_devices(rs); + mddev_unlock(mddev); } if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 6f9ff14971f982987af990af9574d247f3555f0a..42d4c38ba54d5fe4f0055a6e3eda01178a0bbab5 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -234,7 +234,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, sector_t doff; bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; - if (pg_index == store->file_pages - 1) { + /* we compare length (page numbers), not page offset. */ + if ((pg_index - store->sb_index) == store->file_pages - 1) { unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1); if (last_page_size == 0) @@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index, struct page *page = store->filemap[pg_index]; if (mddev_is_clustered(bitmap->mddev)) { - pg_index += bitmap->cluster_slot * - DIV_ROUND_UP(store->bytes, PAGE_SIZE); + /* go to node bitmap area starting point */ + pg_index += store->sb_index; } if (store->file) @@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) unsigned long index = file_page_index(store, chunk); unsigned long node_offset = 0; + index += store->sb_index; if (mddev_is_clustered(bitmap->mddev)) node_offset = bitmap->cluster_slot * store->file_pages; @@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) unsigned long index = file_page_index(store, chunk); unsigned long node_offset = 0; + index += store->sb_index; if (mddev_is_clustered(bitmap->mddev)) node_offset = bitmap->cluster_slot * store->file_pages; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 68d86dbecb4ac8d6a837d7e72a99c7c6165c8e6a..212bf85edad03874dd48fcfc4f77bf9d8d2e55bb 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2420,7 +2420,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) atomic_inc(&conf->active_stripes); raid5_release_stripe(sh); - conf->max_nr_stripes++; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1); return 1; } @@ -2717,7 +2717,7 @@ static int drop_one_stripe(struct r5conf *conf) shrink_buffers(sh); free_stripe(conf->slab_cache, sh); atomic_dec(&conf->active_stripes); - conf->max_nr_stripes--; + WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1); return 1; } @@ -6901,7 +6901,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) if (size <= 16 || size > 32768) return -EINVAL; - conf->min_nr_stripes = size; + WRITE_ONCE(conf->min_nr_stripes, size); mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) @@ -6913,7 +6913,7 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) { - conf->min_nr_stripes = conf->max_nr_stripes; + WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes); result = -ENOMEM; break; } @@ -7478,11 +7478,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); + int max_stripes = READ_ONCE(conf->max_nr_stripes); + int min_stripes = READ_ONCE(conf->min_nr_stripes); - if (conf->max_nr_stripes < conf->min_nr_stripes) + if (max_stripes < min_stripes) /* unlikely, but not impossible */ return 0; - return conf->max_nr_stripes - conf->min_nr_stripes; + return max_stripes - min_stripes; } static struct r5conf *setup_conf(struct mddev *mddev) diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index 83468d4a440b3546c53ce4da2b3337018af6f7cf..21c354067f44a15bacc2a7c02cbaa1d4657af501 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -522,14 +522,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk, /* * Move the top entry link cursor to the next link. If all links of the entry - * have been visited, pop the entry itself. + * have been visited, pop the entry itself. Return true if the entry has been + * popped. */ -static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) +static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entry; if (WARN_ON(walk->stack.top < 0)) - return; + return false; entry = media_pipeline_walk_top(walk); @@ -539,7 +540,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) walk->stack.top); walk->stack.top--; - return; + return true; } entry->links = entry->links->next; @@ -547,6 +548,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) dev_dbg(walk->mdev->dev, "media pipeline: moved entry %u to next link\n", walk->stack.top); + + return false; } /* Free all memory allocated while walking the pipeline. */ @@ -592,30 +595,24 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, struct media_pipeline_walk *walk) { struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk); - struct media_pad *pad; + struct media_pad *origin; struct media_link *link; struct media_pad *local; struct media_pad *remote; + bool last_link; int ret; - pad = entry->pad; + origin = entry->pad; link = list_entry(entry->links, typeof(*link), list); - media_pipeline_walk_pop(walk); + last_link = media_pipeline_walk_pop(walk); dev_dbg(walk->mdev->dev, "media pipeline: exploring link '%s':%u -> '%s':%u\n", link->source->entity->name, link->source->index, link->sink->entity->name, link->sink->index); - /* Skip links that are not enabled. */ - if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { - dev_dbg(walk->mdev->dev, - "media pipeline: skipping link (disabled)\n"); - return 0; - } - /* Get the local pad and remote pad. */ - if (link->source->entity == pad->entity) { + if (link->source->entity == origin->entity) { local = link->source; remote = link->sink; } else { @@ -627,25 +624,64 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe, * Skip links that originate from a different pad than the incoming pad * that is not connected internally in the entity to the incoming pad. */ - if (pad != local && - !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) { + if (origin != local && + !media_entity_has_pad_interdep(origin->entity, origin->index, + local->index)) { dev_dbg(walk->mdev->dev, "media pipeline: skipping link (no route)\n"); - return 0; + goto done; } /* - * Add the local and remote pads of the link to the pipeline and push - * them to the stack, if they're not already present. + * Add the local pad of the link to the pipeline and push it to the + * stack, if not already present. */ ret = media_pipeline_add_pad(pipe, walk, local); if (ret) return ret; + /* Similarly, add the remote pad, but only if the link is enabled. */ + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (disabled)\n"); + goto done; + } + ret = media_pipeline_add_pad(pipe, walk, remote); if (ret) return ret; +done: + /* + * If we're done iterating over links, iterate over pads of the entity. + * This is necessary to discover pads that are not connected with any + * link. Those are dead ends from a pipeline exploration point of view, + * but are still part of the pipeline and need to be added to enable + * proper validation. + */ + if (!last_link) + return 0; + + dev_dbg(walk->mdev->dev, + "media pipeline: adding unconnected pads of '%s'\n", + local->entity->name); + + media_entity_for_each_pad(origin->entity, local) { + /* + * Skip the origin pad (already handled), pad that have links + * (already discovered through iterating over links) and pads + * not internally connected. + */ + if (origin == local || !local->num_links || + !media_entity_has_pad_interdep(origin->entity, origin->index, + local->index)) + continue; + + ret = media_pipeline_add_pad(pipe, walk, local); + if (ret) + return ret; + } + return 0; } @@ -757,7 +793,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad, struct media_pad *pad = ppad->pad; struct media_entity *entity = pad->entity; bool has_enabled_link = false; - bool has_link = false; struct media_link *link; dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name, @@ -787,7 +822,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad, /* Record if the pad has links and enabled links. */ if (link->flags & MEDIA_LNK_FL_ENABLED) has_enabled_link = true; - has_link = true; /* * Validate the link if it's enabled and has the @@ -825,7 +859,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad, * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set, * ensure that it has either no link or an enabled link. */ - if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link && + if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && !has_enabled_link) { dev_dbg(mdev->dev, "Pad '%s':%u must be connected by an enabled link\n", @@ -1025,6 +1059,9 @@ static void __media_entity_remove_link(struct media_entity *entity, /* Remove the reverse links for a data link. */ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) { + link->source->num_links--; + link->sink->num_links--; + if (link->source->entity == entity) remote = link->sink->entity; else @@ -1079,6 +1116,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, struct media_link *link; struct media_link *backlink; + if (flags & MEDIA_LNK_FL_LINK_TYPE) + return -EINVAL; + + flags |= MEDIA_LNK_FL_DATA_LINK; + if (WARN_ON(!source || !sink) || WARN_ON(source_pad >= source->num_pads) || WARN_ON(sink_pad >= sink->num_pads)) @@ -1094,7 +1136,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, link->source = &source->pads[source_pad]; link->sink = &sink->pads[sink_pad]; - link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK; + link->flags = flags; /* Initialize graph object embedded at the new link */ media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK, @@ -1125,6 +1167,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad, sink->num_links++; source->num_links++; + link->source->num_links++; + link->sink->num_links++; + return 0; } EXPORT_SYMBOL_GPL(media_create_pad_link); diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c index 792f031e032ae93544c6da9a671b164e271773c4..c9a4d091b5707402bacc5f53261e61434758f9f5 100644 --- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c +++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c @@ -161,7 +161,6 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar, pad = media_pad_remote_pad_first(&xbar->pads[sink_pad]); sd = media_entity_to_v4l2_subdev(pad->entity); - if (!sd) { dev_dbg(xbar->isi->dev, "no entity connected to crossbar input %u\n", @@ -465,7 +464,8 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi) } for (i = 0; i < xbar->num_sinks; ++i) - xbar->pads[i].flags = MEDIA_PAD_FL_SINK; + xbar->pads[i].flags = MEDIA_PAD_FL_SINK + | MEDIA_PAD_FL_MUST_CONNECT; for (i = 0; i < xbar->num_sources; ++i) xbar->pads[i + xbar->num_sinks].flags = MEDIA_PAD_FL_SOURCE; diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index 57ded9ff3f04350df4c7705487c0110d0437d195..29bc63021c5aae978b7937b31b9eb81fc03a4fde 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc4000_priv *priv = fe->tuner_priv; + mutex_lock(&priv->lock); *freq = priv->freq_hz + priv->freq_offset; if (debug) { - mutex_lock(&priv->lock); if ((priv->cur_fw.type & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) { u16 snr = 0; @@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) return 0; } } - mutex_unlock(&priv->lock); } + mutex_unlock(&priv->lock); dprintk(1, "%s()\n", __func__); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 32d49100dff5161407f8c811d6fd12657f25700a..86efa6084696ec498f14f0ab15928f6d22444204 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -874,10 +874,11 @@ static const struct block_device_operations mmc_bdops = { static int mmc_blk_part_switch_pre(struct mmc_card *card, unsigned int part_type) { - const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; + const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if ((part_type & mask) == mask) { + if ((part_type & mask) == rpmb) { if (card->ext_csd.cmdq_en) { ret = mmc_cmdq_disable(card); if (ret) @@ -892,10 +893,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card, static int mmc_blk_part_switch_post(struct mmc_card *card, unsigned int part_type) { - const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; + const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if ((part_type & mask) == mask) { + if ((part_type & mask) == rpmb) { mmc_retune_unpause(card->host); if (card->reenable_cmdq && !card->ext_csd.cmdq_en) ret = mmc_cmdq_enable(card); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index be7f18fd4836ab2943de311523d7c0c4493eac75..c253d176db69180b9da70036e3594ab587547d0c 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work) else mrq->cmd->error = -ETIMEDOUT; + /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */ + host->mrq = ERR_PTR(-EBUSY); host->cmd = NULL; host->data = NULL; diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index a506e658d4624a6d40d49516a41b17ef4303f2be..439e9593c8ed175cddf1f5cd4663eb496b333b0e 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -63,7 +63,7 @@ #define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \ ( \ (cmd_dir) | \ - ((ran) << 19) | \ + (ran) | \ ((bch) << 14) | \ ((short_mode) << 13) | \ (((page_size) & 0x7f) << 6) | \ diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 28c8151a0725d5b5c9abbc31bdab3e48fad3253b..2cdc29483aee0d3f77361247feefd33f0f96a6dd 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi) sizeof(struct ubi_fm_scan_pool) + sizeof(struct ubi_fm_scan_pool) + (ubi->peb_count * sizeof(struct ubi_fm_ec)) + - (sizeof(struct ubi_fm_eba) + - (ubi->peb_count * sizeof(__be32))) + - sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; + ((sizeof(struct ubi_fm_eba) + + sizeof(struct ubi_fm_volhdr)) * + (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) + + (ubi->peb_count * sizeof(__be32)); return roundup(size, ubi->leb_size); } diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index f700f0e4f2ec4d7c700a2428fd83e54129992c24..6e5489e233dd2afadc99e150ac9476ec622c38d3 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) * The number of supported volumes is limited by the eraseblock size * and by the UBI_MAX_VOLUMES constant. */ + + if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) { + ubi_err(ubi, "LEB size too small for a volume record"); + return -EINVAL; + } + ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; if (ubi->vtbl_slots > UBI_MAX_VOLUMES) ubi->vtbl_slots = UBI_MAX_VOLUMES; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index df9c26593dbe15ae404e2dd26cf81a3084830449..567e3a0675d88ca7a180124b8e778b69ae89e92f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1179,8 +1179,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, scan_request = cfg->scan_request; cfg->scan_request = NULL; - if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); + timer_delete_sync(&cfg->escan_timeout); if (fw_abort) { /* Do a scan abort to stop the driver's scan engine */ @@ -8441,6 +8440,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); wl_deinit_priv(cfg); + cancel_work_sync(&cfg->escan_timeout_work); brcmf_free_wiphy(cfg->wiphy); kfree(cfg); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c index 86eafdb405419873a1cbb385cb1c4f292408001e..f610818c2b059dd8ab9c53710a0a3dc9e50e18de 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c @@ -187,9 +187,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr) mutex_lock(&fwvid_list_lock); - drvr->vops = NULL; - list_del(&drvr->bus_if->list); - + if (drvr->vops) { + drvr->vops = NULL; + list_del(&drvr->bus_if->list); + } mutex_unlock(&fwvid_list_lock); } diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c index 7a5cbdc31ef7937f0204f7bf081b6e87577b90de..e2c7d9f876836a32d335d751cb3303ffd5fa204f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c @@ -9,24 +9,36 @@ #include "usb.h" static const struct usb_device_id rtw_8821cu_id_table[] = { - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */ - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */ - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */ - { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff), - .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */ + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */ + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */ + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */ {}, }; MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table); diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c index d6b533497ce1a5334acd6c647e1d87c15cc579af..ba2714bef8d0e32826b07e4b17599709267ffaf5 100644 --- a/drivers/nvmem/meson-efuse.c +++ b/drivers/nvmem/meson-efuse.c @@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev) struct nvmem_config *econfig; struct clk *clk; unsigned int size; - int ret; sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0); if (!sm_np) { @@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev) if (!fw) return -EPROBE_DEFER; - clk = devm_clk_get(dev, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get efuse gate"); - return ret; - } - - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "failed to enable gate"); - return ret; - } - - ret = devm_add_action_or_reset(dev, - (void(*)(void *))clk_disable_unprepare, - clk); - if (ret) { - dev_err(dev, "failed to add disable callback"); - return ret; - } + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate"); if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) { dev_err(dev, "failed to get max user"); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 9d1f259fe35732dc1ebf921871361445d75a967e..ad6516a3ae6eaafcfdcf96f7772577e012b58f08 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -671,8 +671,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep) nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + /* + * PCIe r6.0, sec 7.8.6.2 require us to support at least one + * size in the range from 1 MB to 512 GB. Advertise support + * for 1 MB BAR size only. + */ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) - dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); + dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4)); } /* diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 64420ecc24d1c38c45fc838e5b635495564172d3..d3ca6d3493130b3875030907199c9bc94d2a172a 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -53,6 +53,7 @@ #define PARF_SLV_ADDR_SPACE_SIZE 0x358 #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_TABLE_N 0x2000 +#define PARF_BDF_TO_SID_CFG 0x2c00 /* ELBI registers */ #define ELBI_SYS_CTRL 0x04 @@ -120,6 +121,9 @@ /* PARF_DEVICE_TYPE register fields */ #define DEVICE_TYPE_RC 0x4 +/* PARF_BDF_TO_SID_CFG fields */ +#define BDF_TO_SID_BYPASS BIT(0) + /* ELBI_SYS_CTRL register fields */ #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) @@ -985,11 +989,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; int i, nr_map, size = 0; u32 smmu_sid_base; + u32 val; of_get_property(dev->of_node, "iommu-map", &size); if (!size) return 0; + /* Enable BDF to SID translation by disabling bypass mode (default) */ + val = readl(pcie->parf + PARF_BDF_TO_SID_CFG); + val &= ~BDF_TO_SID_BYPASS; + writel(val, pcie->parf + PARF_BDF_TO_SID_CFG); + map = kzalloc(size, GFP_KERNEL); if (!map) return -ENOMEM; diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index bed3cefdaf198f86e37ecae1cce391e196d87ede..5ab1a035c49691d6e5baed8f66da231eb25066a2 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -49,6 +49,7 @@ #include #include #include +#include #include /* @@ -465,7 +466,7 @@ struct pci_eject_response { u32 status; } __packed; -static int pci_ring_size = (4 * PAGE_SIZE); +static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K); /* * Driver specific state. diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 51ec9e7e784f0e3a82134a05b114abe5be90d07c..9c59bf03d6579f5527ae7da35030cc88962c7cf1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev) if (drv->remove) { pm_runtime_get_sync(dev); + /* + * If the driver provides a .runtime_idle() callback and it has + * started to run already, it may continue to run in parallel + * with the code below, so wait until all of the runtime PM + * activity has completed. + */ + pm_runtime_barrier(dev); drv->remove(pci_dev); pm_runtime_put_noidle(dev); } diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 59c90d04a609afbc6f2c485604e8038fbaecddc2..705893b5f7b09babb7ff2b003dd49d74f408f0fe 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -13,6 +13,7 @@ #define dev_fmt(fmt) "AER: " fmt #include +#include #include #include #include @@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev, return 0; } +static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data) +{ + pm_runtime_get_sync(&pdev->dev); + return 0; +} + +static int pci_pm_runtime_put(struct pci_dev *pdev, void *data) +{ + pm_runtime_put(&pdev->dev); + return 0; +} + static int report_frozen_detected(struct pci_dev *dev, void *data) { return report_error_detected(dev, pci_channel_io_frozen, data); @@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, else bridge = pci_upstream_bridge(dev); + pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL); + pci_dbg(bridge, "broadcast error_detected message\n"); if (state == pci_channel_io_frozen) { pci_walk_bridge(bridge, report_frozen_detected, &status); @@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pcie_clear_device_status(dev); pci_aer_clear_nonfatal_status(dev); } + + pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); + pci_info(bridge, "device recovery successful\n"); return status; failed: + pci_walk_bridge(bridge, pci_pm_runtime_put, NULL); + pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); /* TODO: Should kernel panic here? */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1739fd54a3106c27284360c64a23820290cf2108..b3976dcb71f10168cf2bae81bba9bbce5cda424f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -6219,6 +6219,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size); #endif /* diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 142ebe0247cc00f82080303269835ad9565e584f..983a6e6173bd215c72a1960a2c523bcaf2d11833 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, } EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion); +int tegra_xusb_padctl_get_port_number(struct phy *phy) +{ + struct tegra_xusb_lane *lane; + + if (!phy) + return -ENODEV; + + lane = phy_get_drvdata(phy); + + return lane->index; +} +EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number); + MODULE_AUTHOR("Thierry Reding "); MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 2feed036c1cd415ef98a7ea3437ebd5bc7429b10..9d3e102f1a76b64bf8dab9cf45ea278c71307480 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -759,6 +760,11 @@ static int rapl_config(struct rapl_package *rp) default: return -EINVAL; } + + /* defaults_msr can be NULL on unsupported platforms */ + if (!rp->priv->defaults || !rp->priv->rpi) + return -ENODEV; + return 0; } @@ -1499,7 +1505,7 @@ static int rapl_detect_domains(struct rapl_package *rp) } /* called from CPU hotplug notifier, hotplug lock held */ -void rapl_remove_package(struct rapl_package *rp) +void rapl_remove_package_cpuslocked(struct rapl_package *rp) { struct rapl_domain *rd, *rd_package = NULL; @@ -1528,10 +1534,18 @@ void rapl_remove_package(struct rapl_package *rp) list_del(&rp->plist); kfree(rp); } +EXPORT_SYMBOL_GPL(rapl_remove_package_cpuslocked); + +void rapl_remove_package(struct rapl_package *rp) +{ + guard(cpus_read_lock)(); + rapl_remove_package_cpuslocked(rp); +} EXPORT_SYMBOL_GPL(rapl_remove_package); /* caller to ensure CPU hotplug lock is held */ -struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu) +struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, + bool id_is_cpu) { struct rapl_package *rp; int uid; @@ -1549,10 +1563,17 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, return NULL; } +EXPORT_SYMBOL_GPL(rapl_find_package_domain_cpuslocked); + +struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu) +{ + guard(cpus_read_lock)(); + return rapl_find_package_domain_cpuslocked(id, priv, id_is_cpu); +} EXPORT_SYMBOL_GPL(rapl_find_package_domain); /* called from CPU hotplug notifier, hotplug lock held */ -struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu) +struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu) { struct rapl_package *rp; int ret; @@ -1598,6 +1619,13 @@ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id kfree(rp); return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(rapl_add_package_cpuslocked); + +struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu) +{ + guard(cpus_read_lock)(); + return rapl_add_package_cpuslocked(id, priv, id_is_cpu); +} EXPORT_SYMBOL_GPL(rapl_add_package); static void power_limit_state_save(void) diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c index 250bd41a588c7d97f8cc3edbd85258f8dd244412..b4b6930cacb0b11b016c771b2301a4c8f416f707 100644 --- a/drivers/powercap/intel_rapl_msr.c +++ b/drivers/powercap/intel_rapl_msr.c @@ -73,9 +73,9 @@ static int rapl_cpu_online(unsigned int cpu) { struct rapl_package *rp; - rp = rapl_find_package_domain(cpu, rapl_msr_priv, true); + rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true); if (!rp) { - rp = rapl_add_package(cpu, rapl_msr_priv, true); + rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true); if (IS_ERR(rp)) return PTR_ERR(rp); } @@ -88,14 +88,14 @@ static int rapl_cpu_down_prep(unsigned int cpu) struct rapl_package *rp; int lead_cpu; - rp = rapl_find_package_domain(cpu, rapl_msr_priv, true); + rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true); if (!rp) return 0; cpumask_clear_cpu(cpu, &rp->cpumask); lead_cpu = cpumask_first(&rp->cpumask); if (lead_cpu >= nr_cpu_ids) - rapl_remove_package(rp); + rapl_remove_package_cpuslocked(rp); else if (rp->lead_cpu == cpu) rp->lead_cpu = lead_cpu; return 0; diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index 891c90fefd8b72157cb978b7ee36201d8c1e72a5..f6b7f085977ce1c80e2716fc43ba4152486ba195 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -40,6 +40,7 @@ enum tpmi_rapl_register { TPMI_RAPL_REG_ENERGY_STATUS, TPMI_RAPL_REG_PERF_STATUS, TPMI_RAPL_REG_POWER_INFO, + TPMI_RAPL_REG_DOMAIN_INFO, TPMI_RAPL_REG_INTERRUPT, TPMI_RAPL_REG_MAX = 15, }; @@ -130,6 +131,12 @@ static void trp_release(struct tpmi_rapl_package *trp) mutex_unlock(&tpmi_rapl_lock); } +/* + * Bit 0 of TPMI_RAPL_REG_DOMAIN_INFO indicates if the current package is a domain + * root or not. Only domain root packages can enumerate System (Psys) Domain. + */ +#define TPMI_RAPL_DOMAIN_ROOT BIT(0) + static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) { u8 tpmi_domain_version; @@ -139,6 +146,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) enum rapl_domain_reg_id reg_id; int tpmi_domain_size, tpmi_domain_flags; u64 tpmi_domain_header = readq(trp->base + offset); + u64 tpmi_domain_info; /* Domain Parent bits are ignored for now */ tpmi_domain_version = tpmi_domain_header & 0xff; @@ -169,6 +177,13 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset) domain_type = RAPL_DOMAIN_PACKAGE; break; case TPMI_RAPL_DOMAIN_SYSTEM: + if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_DOMAIN_INFO))) { + pr_warn(FW_BUG "System domain must support Domain Info register\n"); + return -ENODEV; + } + tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO); + if (!(tpmi_domain_info & TPMI_RAPL_DOMAIN_ROOT)) + return 0; domain_type = RAPL_DOMAIN_PLATFORM; break; case TPMI_RAPL_DOMAIN_MEMORY: diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 83d76915a6ad6f28f930b705c274dc82897b36de..25b66b113b69594b7edf01caff62bda8435c9bbb 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev) kfree(vdev); + of_reserved_mem_device_release(&rvdev->pdev->dev); + dma_release_coherent_memory(&rvdev->pdev->dev); + put_device(&rvdev->pdev->dev); } @@ -584,9 +587,6 @@ static void rproc_virtio_remove(struct platform_device *pdev) rproc_remove_subdev(rproc, &rvdev->subdev); rproc_remove_rvdev(rvdev); - of_reserved_mem_device_release(&pdev->dev); - dma_release_coherent_memory(&pdev->dev); - put_device(&rproc->dev); } diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c index d43873bb5fe6df58d212122abf3fa1d92121914f..01cbd462198107b2eb1de205bb7aa1d933420c93 100644 --- a/drivers/slimbus/core.c +++ b/drivers/slimbus/core.c @@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev, if (ret < 0) goto err; } else if (report_present) { - ret = ida_simple_get(&ctrl->laddr_ida, - 0, SLIM_LA_MANAGER - 1, GFP_KERNEL); + ret = ida_alloc_max(&ctrl->laddr_ida, + SLIM_LA_MANAGER - 1, GFP_KERNEL); if (ret < 0) goto err; diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index e530767e80a5d1cfca3854038dc755f2d388c667..55cc44a401bc43b07812cacaae7d7a33f2b4fd8b 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu, struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; /* Initialize subdev media entity */ + imgu_sd->subdev.entity.ops = &imgu_media_ops; + for (i = 0; i < IMGU_NODE_NUM; i++) { + imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? + MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; + } r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM, imgu_sd->subdev_pads); if (r) { @@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu, "failed initialize subdev media entity (%d)\n", r); return r; } - imgu_sd->subdev.entity.ops = &imgu_media_ops; - for (i = 0; i < IMGU_NODE_NUM; i++) { - imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ? - MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; - } /* Initialize subdev */ v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops); @@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe, } /* Initialize media entities */ + node->vdev_pad.flags = node->output ? + MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; + vdev->entity.ops = NULL; r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad); if (r) { dev_err(dev, "failed initialize media entity (%d)\n", r); mutex_destroy(&node->lock); return r; } - node->vdev_pad.flags = node->output ? - MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; - vdev->entity.ops = NULL; /* Initialize vbq */ vbq->type = node->vdev_fmt.type; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 3ca0a2f5937f2624ff6e73ab0a006fb7a49361e4..cdf88cadfc4f19d62c8214510550684d64d4b6eb 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -113,14 +113,14 @@ static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone, int *temp) { int cpu; - int curr_temp; + int curr_temp, ret; *temp = 0; for_each_online_cpu(cpu) { - curr_temp = intel_tcc_get_temp(cpu, false); - if (curr_temp < 0) - return curr_temp; + ret = intel_tcc_get_temp(cpu, &curr_temp, false); + if (ret < 0) + return ret; if (!*temp || curr_temp > *temp) *temp = curr_temp; } diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c index 2f00fc3bf274a33255b1308de7772f1752c2c638..e964a9375722ad968f51c2c2d471863c35fc5de1 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c @@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu) if (topology_physical_package_id(cpu)) return 0; - rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); + rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true); if (!rp) { - rp = rapl_add_package(cpu, &rapl_mmio_priv, true); + rp = rapl_add_package_cpuslocked(cpu, &rapl_mmio_priv, true); if (IS_ERR(rp)) return PTR_ERR(rp); } @@ -42,14 +42,14 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu) struct rapl_package *rp; int lead_cpu; - rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true); + rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true); if (!rp) return 0; cpumask_clear_cpu(cpu, &rp->cpumask); lead_cpu = cpumask_first(&rp->cpumask); if (lead_cpu >= nr_cpu_ids) - rapl_remove_package(rp); + rapl_remove_package_cpuslocked(rp); else if (rp->lead_cpu == cpu) rp->lead_cpu = lead_cpu; return 0; diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c index 2e5c741c41ca03467074b615dee8dcc65c9b2deb..5e8b7f34b39510a28f2c253e585b0eec54397450 100644 --- a/drivers/thermal/intel/intel_tcc.c +++ b/drivers/thermal/intel/intel_tcc.c @@ -103,18 +103,19 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC); /** * intel_tcc_get_temp() - returns the current temperature * @cpu: cpu that the MSR should be run on, nagative value means any cpu. + * @temp: pointer to the memory for saving cpu temperature. * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor. * * Get the current temperature returned by the CPU core/package level * thermal sensor, in degrees C. * - * Return: Temperature in degrees C on success, negative error code otherwise. + * Return: 0 on success, negative error code otherwise. */ -int intel_tcc_get_temp(int cpu, bool pkg) +int intel_tcc_get_temp(int cpu, int *temp, bool pkg) { u32 low, high; u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; - int tjmax, temp, err; + int tjmax, err; tjmax = intel_tcc_get_tjmax(cpu); if (tjmax < 0) @@ -131,9 +132,8 @@ int intel_tcc_get_temp(int cpu, bool pkg) if (!(low & BIT(31))) return -ENODATA; - temp = tjmax - ((low >> 16) & 0x7f); + *temp = tjmax - ((low >> 16) & 0x7f); - /* Do not allow negative CPU temperature */ - return temp >= 0 ? temp : -ENODATA; + return 0; } EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC); diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 11a7f8108bbbfeb5a92e054f203b242c340ebaf4..61c3d450ee605ae429e111f98b4b35516bf4d835 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -108,11 +108,11 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu) static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) { struct zone_device *zonedev = thermal_zone_device_priv(tzd); - int val; + int val, ret; - val = intel_tcc_get_temp(zonedev->cpu, true); - if (val < 0) - return val; + ret = intel_tcc_get_temp(zonedev->cpu, &val, true); + if (ret < 0) + return ret; *temp = val * 1000; pr_debug("sys_get_curr_temp %d\n", *temp); diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c index 8b0edb2048443d9e7f05dd09977e447b4b594a20..9ee2e7283435acfcbb1a956303b6122a08affecc 100644 --- a/drivers/thermal/mediatek/auxadc_thermal.c +++ b/drivers/thermal/mediatek/auxadc_thermal.c @@ -690,6 +690,9 @@ static const struct mtk_thermal_data mt7986_thermal_data = { .adcpnp = mt7986_adcpnp, .sensor_mux_values = mt7986_mux_values, .version = MTK_THERMAL_V3, + .apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1, + .apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3), + .apmixed_buffer_ctl_set = BIT(0), }; static bool mtk_thermal_temp_is_valid(int temp) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 141627370aabc334936aa560f4881ec258005fbc..a17803da83f8cd187082e571415b087511b275a6 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1337,9 +1337,6 @@ static void autoconfig_irq(struct uart_8250_port *up) inb_p(ICP); } - if (uart_console(port)) - console_lock(); - /* forget possible initially masked and pending IRQ */ probe_irq_off(probe_irq_on()); save_mcr = serial8250_in_MCR(up); @@ -1379,9 +1376,6 @@ static void autoconfig_irq(struct uart_8250_port *up) if (port->flags & UPF_FOURPORT) outb_p(save_ICP, ICP); - if (uart_console(port)) - console_unlock(); - port->irq = (irq > 0) ? irq : 0; } diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 4814aa310dcc5a1c3ff2163a3d43b2191bfd5da3..e339abff926d324457cf435abac2fc72d6834c94 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1635,13 +1635,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr, static int max310x_i2c_probe(struct i2c_client *client) { - const struct max310x_devtype *devtype = - device_get_match_data(&client->dev); + const struct max310x_devtype *devtype; struct i2c_client *port_client; struct regmap *regmaps[4]; unsigned int i; u8 port_addr; + devtype = device_get_match_data(&client->dev); + if (!devtype) + return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n"); + if (client->addr < devtype->slave_addr.min || client->addr > devtype->slave_addr.max) return dev_err_probe(&client->dev, -EINVAL, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 083ea4de48f9a32093495db64ce5dde3b6177a72..4c81210ad9b3a5e2114139b6b28dfdf767f7329a 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2602,7 +2602,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, port->type = PORT_UNKNOWN; flags |= UART_CONFIG_TYPE; } + /* Synchronize with possible boot console. */ + if (uart_console(port)) + console_lock(); port->ops->config_port(port, flags); + if (uart_console(port)) + console_unlock(); } if (port->type != PORT_UNKNOWN) { @@ -2610,6 +2615,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, uart_report_port(drv, port); + /* Synchronize with possible boot console. */ + if (uart_console(port)) + console_lock(); + /* Power up port for set_mctrl() */ uart_change_pm(state, UART_PM_STATE_ON); @@ -2626,6 +2635,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, uart_rs485_config(port); + if (uart_console(port)) + console_unlock(); + /* * If this driver supports console, and it hasn't been * successfully registered yet, try to re-register it. diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c index 90a587bc29b74e533212f41e1a25fcda9655d68f..ea6e29091c0c9af61e7e6e626189e32f55a527fc 100644 --- a/drivers/usb/dwc3/dwc3-am62.c +++ b/drivers/usb/dwc3/dwc3-am62.c @@ -267,21 +267,15 @@ static int dwc3_ti_probe(struct platform_device *pdev) return ret; } -static int dwc3_ti_remove_core(struct device *dev, void *c) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - return 0; -} - static void dwc3_ti_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dwc3_am62 *am62 = platform_get_drvdata(pdev); u32 reg; - device_for_each_child(dev, NULL, dwc3_ti_remove_core); + pm_runtime_get_sync(dev); + device_init_wakeup(dev, false); + of_platform_depopulate(dev); /* Clear mode valid bit */ reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL); @@ -289,7 +283,6 @@ static void dwc3_ti_remove(struct platform_device *pdev) dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg); pm_runtime_put_sync(dev); - clk_disable_unprepare(am62->usb2_refclk); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); } diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index cb85168fd00c2895a70066d998169320524ccbe0..7aa46d426f31b26a1ee4e650f28871db1e3d4de4 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc) static int tegra_xudc_phy_get(struct tegra_xudc *xudc) { - int err = 0, usb3; - unsigned int i; + int err = 0, usb3_companion_port; + unsigned int i, j; xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys, sizeof(*xudc->utmi_phy), GFP_KERNEL); @@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) if (IS_ERR(xudc->utmi_phy[i])) { err = PTR_ERR(xudc->utmi_phy[i]); dev_err_probe(xudc->dev, err, - "failed to get usb2-%d PHY\n", i); + "failed to get PHY for phy-name usb2-%d\n", i); goto clean_up; } else if (xudc->utmi_phy[i]) { /* Get usb-phy, if utmi phy is available */ @@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) } /* Get USB3 phy */ - usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); - if (usb3 < 0) + usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i); + if (usb3_companion_port < 0) continue; - snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3); - xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); - if (IS_ERR(xudc->usb3_phy[i])) { - err = PTR_ERR(xudc->usb3_phy[i]); - dev_err_probe(xudc->dev, err, - "failed to get usb3-%d PHY\n", usb3); - goto clean_up; - } else if (xudc->usb3_phy[i]) - dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3); + for (j = 0; j < xudc->soc->num_phys; j++) { + snprintf(phy_name, sizeof(phy_name), "usb3-%d", j); + xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name); + if (IS_ERR(xudc->usb3_phy[i])) { + err = PTR_ERR(xudc->usb3_phy[i]); + dev_err_probe(xudc->dev, err, + "failed to get PHY for phy-name usb3-%d\n", j); + goto clean_up; + } else if (xudc->usb3_phy[i]) { + int usb2_port = + tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]); + int usb3_port = + tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]); + if (usb3_port == usb3_companion_port) { + dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n", + usb2_port, usb3_port, i); + break; + } + } + } } return err; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 132b76fa7ca6077da45e2bc35da11470888a6ecf..c4c733d724bd8b6e0ce2de4bcb71459dd5333c45 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1178,6 +1178,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) temp = kzalloc_node(buf_len, GFP_ATOMIC, dev_to_node(hcd->self.sysdev)); + if (!temp) + return -ENOMEM; if (usb_urb_dir_out(urb)) sg_pcopy_to_buffer(urb->sg, urb->num_sgs, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 923e0ed85444be9fde31e0b0d965813fc99c5acf..21fd26609252bea34ec95983daf5a3073d8827d2 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ + { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */ + { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ @@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ + { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */ @@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ + { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 13a56783830df2503edf24eba00436b9a03c4bcf..22d01a0f10fbc215f3fb07ef7e0478b15162e5e7 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + /* GMC devices */ + { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 21a2b5a25fc09732800830401546626d91074ffa..5ee60ba2a73cdbeaa117a2f81ad46c20c837d8c7 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1606,3 +1606,9 @@ #define UBLOX_VID 0x1546 #define UBLOX_C099F9P_ZED_PID 0x0502 #define UBLOX_C099F9P_ODIN_PID 0x0503 + +/* + * GMC devices + */ +#define GMC_VID 0x1cd7 +#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 2ae124c49d448f63b6d6a3078ad08fffee3ad2d0..55a65d941ccbfb1161d363ac72881ce0f490a8df 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb); /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ #define LUAT_PRODUCT_AIR720U 0x4e00 +/* MeiG Smart Technology products */ +#define MEIGSMART_VENDOR_ID 0x2dee +/* MeiG Smart SLM320 based on UNISOC UIS8910 */ +#define MEIGSMART_PRODUCT_SLM320 0x4d41 + /* Device flags */ /* Highest interface number which can be used with NCTRL() and RSVD() */ @@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 474315a72c7707ab5ae96a2664ada020a906d4b0..13ec976b1c7479d1d652152aab68bc41c8ddcbc7 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -221,12 +221,12 @@ struct ucsi_cable_property { #define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0) #define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1) #define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2) -#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0)) +#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3) #define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0 #define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1 #define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2 #define UCSI_CABLE_PROPERTY_PLUG_OTHER 3 -#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5) +#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5) u8 latency; } __packed; diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c index c51229fccbd6a8f24600ac95a9f93ab6da5b4aee..1a1d0d5ec35c2b45379ecb750c26427f5ec31704 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c @@ -141,13 +141,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, irq = &vdev->mc_irqs[index]; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_fsl_mc_irq_handler(hwirq, irq); + if (irq->trigger) + eventfd_signal(irq->trigger, 1); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { u8 trigger = *(u8 *)data; - if (trigger) - vfio_fsl_mc_irq_handler(hwirq, irq); + if (trigger && irq->trigger) + eventfd_signal(irq->trigger, 1); } return 0; diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c index 4c351c59d05a939097d969b7e3da69325dc839b0..a286ebcc71126224c9e3e1fd7c9ad6c6c3bd2cbd 100644 --- a/drivers/vfio/pci/pds/vfio_dev.c +++ b/drivers/vfio/pci/pds/vfio_dev.c @@ -32,9 +32,9 @@ void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio) mutex_lock(&pds_vfio->reset_mutex); if (pds_vfio->deferred_reset) { pds_vfio->deferred_reset = false; + pds_vfio_put_restore_file(pds_vfio); + pds_vfio_put_save_file(pds_vfio); if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) { - pds_vfio_put_restore_file(pds_vfio); - pds_vfio_put_save_file(pds_vfio); pds_vfio_dirty_disable(pds_vfio, false); } pds_vfio->state = pds_vfio->deferred_reset_state; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index cbb4bcbfbf83d98ec3e9c3a44a120c9fb74623f2..99bbd647e5d817b93cdef84e4424abe765c5c4a5 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -90,22 +90,28 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) if (likely(is_intx(vdev) && !vdev->virq_disabled)) { struct vfio_pci_irq_ctx *ctx; + struct eventfd_ctx *trigger; ctx = vfio_irq_ctx_get(vdev, 0); if (WARN_ON_ONCE(!ctx)) return; - eventfd_signal(ctx->trigger, 1); + + trigger = READ_ONCE(ctx->trigger); + if (likely(trigger)) + eventfd_signal(trigger, 1); } } /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */ -bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) +static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; struct vfio_pci_irq_ctx *ctx; unsigned long flags; bool masked_changed = false; + lockdep_assert_held(&vdev->igate); + spin_lock_irqsave(&vdev->irqlock, flags); /* @@ -143,6 +149,17 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) return masked_changed; } +bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) +{ + bool mask_changed; + + mutex_lock(&vdev->igate); + mask_changed = __vfio_pci_intx_mask(vdev); + mutex_unlock(&vdev->igate); + + return mask_changed; +} + /* * If this is triggered by an eventfd, we can't call eventfd_signal * or else we'll deadlock on the eventfd wait queue. Return >0 when @@ -194,12 +211,21 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused) return ret; } -void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) +static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) { + lockdep_assert_held(&vdev->igate); + if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0) vfio_send_intx_eventfd(vdev, NULL); } +void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev) +{ + mutex_lock(&vdev->igate); + __vfio_pci_intx_unmask(vdev); + mutex_unlock(&vdev->igate); +} + static irqreturn_t vfio_intx_handler(int irq, void *dev_id) { struct vfio_pci_core_device *vdev = dev_id; @@ -231,97 +257,100 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id) return ret; } -static int vfio_intx_enable(struct vfio_pci_core_device *vdev) +static int vfio_intx_enable(struct vfio_pci_core_device *vdev, + struct eventfd_ctx *trigger) { + struct pci_dev *pdev = vdev->pdev; struct vfio_pci_irq_ctx *ctx; + unsigned long irqflags; + char *name; + int ret; if (!is_irq_none(vdev)) return -EINVAL; - if (!vdev->pdev->irq) + if (!pdev->irq) return -ENODEV; + name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); + if (!name) + return -ENOMEM; + ctx = vfio_irq_ctx_alloc(vdev, 0); if (!ctx) return -ENOMEM; + ctx->name = name; + ctx->trigger = trigger; + /* - * If the virtual interrupt is masked, restore it. Devices - * supporting DisINTx can be masked at the hardware level - * here, non-PCI-2.3 devices will have to wait until the - * interrupt is enabled. + * Fill the initial masked state based on virq_disabled. After + * enable, changing the DisINTx bit in vconfig directly changes INTx + * masking. igate prevents races during setup, once running masked + * is protected via irqlock. + * + * Devices supporting DisINTx also reflect the current mask state in + * the physical DisINTx bit, which is not affected during IRQ setup. + * + * Devices without DisINTx support require an exclusive interrupt. + * IRQ masking is performed at the IRQ chip. Again, igate protects + * against races during setup and IRQ handlers and irqfds are not + * yet active, therefore masked is stable and can be used to + * conditionally auto-enable the IRQ. + * + * irq_type must be stable while the IRQ handler is registered, + * therefore it must be set before request_irq(). */ ctx->masked = vdev->virq_disabled; - if (vdev->pci_2_3) - pci_intx(vdev->pdev, !ctx->masked); + if (vdev->pci_2_3) { + pci_intx(pdev, !ctx->masked); + irqflags = IRQF_SHARED; + } else { + irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0; + } vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX; + ret = request_irq(pdev->irq, vfio_intx_handler, + irqflags, ctx->name, vdev); + if (ret) { + vdev->irq_type = VFIO_PCI_NUM_IRQS; + kfree(name); + vfio_irq_ctx_free(vdev, ctx, 0); + return ret; + } + return 0; } -static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd) +static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, + struct eventfd_ctx *trigger) { struct pci_dev *pdev = vdev->pdev; - unsigned long irqflags = IRQF_SHARED; struct vfio_pci_irq_ctx *ctx; - struct eventfd_ctx *trigger; - unsigned long flags; - int ret; + struct eventfd_ctx *old; ctx = vfio_irq_ctx_get(vdev, 0); if (WARN_ON_ONCE(!ctx)) return -EINVAL; - if (ctx->trigger) { - free_irq(pdev->irq, vdev); - kfree(ctx->name); - eventfd_ctx_put(ctx->trigger); - ctx->trigger = NULL; - } - - if (fd < 0) /* Disable only */ - return 0; + old = ctx->trigger; - ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", - pci_name(pdev)); - if (!ctx->name) - return -ENOMEM; + WRITE_ONCE(ctx->trigger, trigger); - trigger = eventfd_ctx_fdget(fd); - if (IS_ERR(trigger)) { - kfree(ctx->name); - return PTR_ERR(trigger); + /* Releasing an old ctx requires synchronizing in-flight users */ + if (old) { + synchronize_irq(pdev->irq); + vfio_virqfd_flush_thread(&ctx->unmask); + eventfd_ctx_put(old); } - ctx->trigger = trigger; - - if (!vdev->pci_2_3) - irqflags = 0; - - ret = request_irq(pdev->irq, vfio_intx_handler, - irqflags, ctx->name, vdev); - if (ret) { - ctx->trigger = NULL; - kfree(ctx->name); - eventfd_ctx_put(trigger); - return ret; - } - - /* - * INTx disable will stick across the new irq setup, - * disable_irq won't. - */ - spin_lock_irqsave(&vdev->irqlock, flags); - if (!vdev->pci_2_3 && ctx->masked) - disable_irq_nosync(pdev->irq); - spin_unlock_irqrestore(&vdev->irqlock, flags); - return 0; } static void vfio_intx_disable(struct vfio_pci_core_device *vdev) { + struct pci_dev *pdev = vdev->pdev; struct vfio_pci_irq_ctx *ctx; ctx = vfio_irq_ctx_get(vdev, 0); @@ -329,10 +358,13 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev) if (ctx) { vfio_virqfd_disable(&ctx->unmask); vfio_virqfd_disable(&ctx->mask); + free_irq(pdev->irq, vdev); + if (ctx->trigger) + eventfd_ctx_put(ctx->trigger); + kfree(ctx->name); + vfio_irq_ctx_free(vdev, ctx, 0); } - vfio_intx_set_signal(vdev, -1); vdev->irq_type = VFIO_PCI_NUM_IRQS; - vfio_irq_ctx_free(vdev, ctx, 0); } /* @@ -560,11 +592,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_pci_intx_unmask(vdev); + __vfio_pci_intx_unmask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t unmask = *(uint8_t *)data; if (unmask) - vfio_pci_intx_unmask(vdev); + __vfio_pci_intx_unmask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0); int32_t fd = *(int32_t *)data; @@ -591,11 +623,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_NONE) { - vfio_pci_intx_mask(vdev); + __vfio_pci_intx_mask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t mask = *(uint8_t *)data; if (mask) - vfio_pci_intx_mask(vdev); + __vfio_pci_intx_mask(vdev); } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { return -ENOTTY; /* XXX implement me */ } @@ -616,19 +648,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev, return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + struct eventfd_ctx *trigger = NULL; int32_t fd = *(int32_t *)data; int ret; - if (is_intx(vdev)) - return vfio_intx_set_signal(vdev, fd); + if (fd >= 0) { + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) + return PTR_ERR(trigger); + } - ret = vfio_intx_enable(vdev); - if (ret) - return ret; + if (is_intx(vdev)) + ret = vfio_intx_set_signal(vdev, trigger); + else + ret = vfio_intx_enable(vdev, trigger); - ret = vfio_intx_set_signal(vdev, fd); - if (ret) - vfio_intx_disable(vdev); + if (ret && trigger) + eventfd_ctx_put(trigger); return ret; } diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c index 665197caed89e41ad042c87a897c411047561602..31636d1414a0492834bfa64e151ecb47050c2825 100644 --- a/drivers/vfio/platform/vfio_platform_irq.c +++ b/drivers/vfio/platform/vfio_platform_irq.c @@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, return 0; } +/* + * The trigger eventfd is guaranteed valid in the interrupt path + * and protected by the igate mutex when triggered via ioctl. + */ +static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx) +{ + if (likely(irq_ctx->trigger)) + eventfd_signal(irq_ctx->trigger, 1); +} + static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; @@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) spin_unlock_irqrestore(&irq_ctx->lock, flags); if (ret == IRQ_HANDLED) - eventfd_signal(irq_ctx->trigger, 1); + vfio_send_eventfd(irq_ctx); return ret; } @@ -164,52 +174,40 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id) { struct vfio_platform_irq *irq_ctx = dev_id; - eventfd_signal(irq_ctx->trigger, 1); + vfio_send_eventfd(irq_ctx); return IRQ_HANDLED; } static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, - int fd, irq_handler_t handler) + int fd) { struct vfio_platform_irq *irq = &vdev->irqs[index]; struct eventfd_ctx *trigger; - int ret; if (irq->trigger) { - irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN); - free_irq(irq->hwirq, irq); - kfree(irq->name); + disable_irq(irq->hwirq); eventfd_ctx_put(irq->trigger); irq->trigger = NULL; } if (fd < 0) /* Disable only */ return 0; - irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)", - irq->hwirq, vdev->name); - if (!irq->name) - return -ENOMEM; trigger = eventfd_ctx_fdget(fd); - if (IS_ERR(trigger)) { - kfree(irq->name); + if (IS_ERR(trigger)) return PTR_ERR(trigger); - } irq->trigger = trigger; - irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); - ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); - if (ret) { - kfree(irq->name); - eventfd_ctx_put(trigger); - irq->trigger = NULL; - return ret; - } - - if (!irq->masked) - enable_irq(irq->hwirq); + /* + * irq->masked effectively provides nested disables within the overall + * enable relative to trigger. Specifically request_irq() is called + * with NO_AUTOEN, therefore the IRQ is initially disabled. The user + * may only further disable the IRQ with a MASK operations because + * irq->masked is initially false. + */ + enable_irq(irq->hwirq); return 0; } @@ -228,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, handler = vfio_irq_handler; if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) - return vfio_set_trigger(vdev, index, -1, handler); + return vfio_set_trigger(vdev, index, -1); if (start != 0 || count != 1) return -EINVAL; @@ -236,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t fd = *(int32_t *)data; - return vfio_set_trigger(vdev, index, fd, handler); + return vfio_set_trigger(vdev, index, fd); } if (flags & VFIO_IRQ_SET_DATA_NONE) { @@ -260,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, unsigned start, unsigned count, uint32_t flags, void *data) = NULL; + /* + * For compatibility, errors from request_irq() are local to the + * SET_IRQS path and reflected in the name pointer. This allows, + * for example, polling mode fallback for an exclusive IRQ failure. + */ + if (IS_ERR(vdev->irqs[index].name)) + return PTR_ERR(vdev->irqs[index].name); + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_MASK: func = vfio_platform_set_irq_mask; @@ -280,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, int vfio_platform_irq_init(struct vfio_platform_device *vdev) { - int cnt = 0, i; + int cnt = 0, i, ret = 0; while (vdev->get_irq(vdev, cnt) >= 0) cnt++; @@ -292,37 +298,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev) for (i = 0; i < cnt; i++) { int hwirq = vdev->get_irq(vdev, i); + irq_handler_t handler = vfio_irq_handler; - if (hwirq < 0) + if (hwirq < 0) { + ret = -EINVAL; goto err; + } spin_lock_init(&vdev->irqs[i].lock); vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; - if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) + if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) { vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED; + handler = vfio_automasked_irq_handler; + } vdev->irqs[i].count = 1; vdev->irqs[i].hwirq = hwirq; vdev->irqs[i].masked = false; + vdev->irqs[i].name = kasprintf(GFP_KERNEL_ACCOUNT, + "vfio-irq[%d](%s)", hwirq, + vdev->name); + if (!vdev->irqs[i].name) { + ret = -ENOMEM; + goto err; + } + + ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN, + vdev->irqs[i].name, &vdev->irqs[i]); + if (ret) { + kfree(vdev->irqs[i].name); + vdev->irqs[i].name = ERR_PTR(ret); + } } vdev->num_irqs = cnt; return 0; err: + for (--i; i >= 0; i--) { + if (!IS_ERR(vdev->irqs[i].name)) { + free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); + kfree(vdev->irqs[i].name); + } + } kfree(vdev->irqs); - return -EINVAL; + return ret; } void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) { int i; - for (i = 0; i < vdev->num_irqs; i++) - vfio_set_trigger(vdev, i, -1, NULL); + for (i = 0; i < vdev->num_irqs; i++) { + vfio_virqfd_disable(&vdev->irqs[i].mask); + vfio_virqfd_disable(&vdev->irqs[i].unmask); + if (!IS_ERR(vdev->irqs[i].name)) { + free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); + if (vdev->irqs[i].trigger) + eventfd_ctx_put(vdev->irqs[i].trigger); + kfree(vdev->irqs[i].name); + } + } vdev->num_irqs = 0; kfree(vdev->irqs); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 9acdd0f91a5ae4874db1d48095450e1739a28baf..312c7f8853866eebefceb3a211666f3d49098000 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2833,11 +2833,6 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) ctx.roots = NULL; } - /* Free the reserved data space */ - btrfs_qgroup_free_refroot(fs_info, - record->data_rsv_refroot, - record->data_rsv, - BTRFS_QGROUP_RSV_DATA); /* * Use BTRFS_SEQ_LAST as time_seq to do special search, * which doesn't lock tree or delayed_refs and search @@ -2861,6 +2856,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) record->old_roots = NULL; new_roots = NULL; } + /* Free the reserved data space */ + btrfs_qgroup_free_refroot(fs_info, + record->data_rsv_refroot, + record->data_rsv, + BTRFS_QGROUP_RSV_DATA); cleanup: ulist_free(record->old_roots); ulist_free(new_roots); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 722a1dde75636970445e831c3d25e793f561114c..36adbb3d096a9d4425e0797fac28ae00765a19cc 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1432,7 +1432,7 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start, if (in_range(physical_start, *start, len) || in_range(*start, physical_start, - physical_end - physical_start)) { + physical_end + 1 - physical_start)) { *start = physical_end + 1; return true; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index ea5ac2636632beebde7a7a6211b5e4b22a89c334..257b70c5ded45b8ab442c027c43c3a0f7bddb9f2 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -5184,10 +5184,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) .fe_len = ac->ac_orig_goal_len, }; loff_t orig_goal_end = extent_logical_end(sbi, &ex); + loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); - /* we can't allocate as much as normalizer wants. - * so, found space must get proper lstart - * to cover original request */ + /* + * We can't allocate as much as normalizer wants, so we try + * to get proper lstart to cover the original request, except + * when the goal doesn't cover the original request as below: + * + * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048 + * best_ex:0/200(200) -> adjusted: 1848/2048(200) + */ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); @@ -5199,7 +5205,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) * 1. Check if best ex can be kept at end of goal (before * cr_best_avail trimmed it) and still cover original start * 2. Else, check if best ex can be kept at start of goal and - * still cover original start + * still cover original end * 3. Else, keep the best ex at start of original request. */ ex.fe_len = ac->ac_b_ex.fe_len; @@ -5209,7 +5215,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) goto adjust_bex; ex.fe_logical = ac->ac_g_ex.fe_logical; - if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex)) + if (o_ex_end <= extent_logical_end(sbi, &ex)) goto adjust_bex; ex.fe_logical = ac->ac_o_ex.fe_logical; @@ -5217,7 +5223,6 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ac->ac_b_ex.fe_logical = ex.fe_logical; BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); - BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index be280268da9fedf015dbc3885b97bd7e5cb33722..5f105171df7b56323cce8ca9d48275b8d6a602fc 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1605,7 +1605,8 @@ static int ext4_flex_group_add(struct super_block *sb, int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); int gdb_num_end = ((group + flex_gd->count - 1) / EXT4_DESC_PER_BLOCK(sb)); - int meta_bg = ext4_has_feature_meta_bg(sb); + int meta_bg = ext4_has_feature_meta_bg(sb) && + gdb_num >= le32_to_cpu(es->s_first_meta_bg); sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr - ext4_group_first_block_no(sb, 0); sector_t old_gdb = 0; diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c index 3626eb585a983e9d1a639df5da5e56bf9f19a118..93c97bf45b0614b4d4743eb4e85132fbd0a70ec7 100644 --- a/fs/fat/nfs.c +++ b/fs/fat/nfs.c @@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp, fid->parent_i_gen = parent->i_generation; type = FILEID_FAT_WITH_PARENT; *lenp = FAT_FID_SIZE_WITH_PARENT; + } else { + /* + * We need to initialize this field because the fh is actually + * 12 bytes long + */ + fid->parent_i_pos_hi = 0; } return type; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index d707e6987da91eaaf06eda157f8487d9488dd12b..a8a7fc0e1754776e0b86bc64c2dc3d8bae2d059b 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -391,6 +391,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name err = -EIO; if (fuse_invalid_attr(&outarg->attr)) goto out_put_forget; + if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) { + pr_warn_once("root generation should be zero\n"); + outarg->generation = 0; + } *inode = fuse_iget(sb, outarg->nodeid, outarg->generation, &outarg->attr, ATTR_TIMEOUT(outarg), @@ -1210,7 +1214,7 @@ static int fuse_do_statx(struct inode *inode, struct file *file, if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) || ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) || inode_wrong_type(inode, sx->mode)))) { - make_bad_inode(inode); + fuse_make_bad(inode); return -EIO; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a660f1f21540abbb3dd60e876ca81a587af3336e..cc9651a01351c25c5bb21afc7698d3f7f8516668 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2467,7 +2467,8 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) return fuse_dax_mmap(file, vma); if (ff->open_flags & FOPEN_DIRECT_IO) { - /* Can't provide the coherency needed for MAP_SHARED + /* + * Can't provide the coherency needed for MAP_SHARED * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set. */ if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap) @@ -2475,7 +2476,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) invalidate_inode_pages2(file->f_mapping); - return generic_file_mmap(file, vma); + if (!(vma->vm_flags & VM_MAYSHARE)) { + /* MAP_PRIVATE */ + return generic_file_mmap(file, vma); + } } if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 9377c46f14c4a8cdf65b90636afa4525582cfaf4..3e65cdc9463163599070486362659accb3f7ca21 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -939,7 +939,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation, static inline void fuse_make_bad(struct inode *inode) { - remove_inode_hash(inode); set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 59743813563e5f6b7cfea3df696c442977b1dd88..23ab31b967a1339a6251febee1401ec9ba212d1a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -472,8 +472,11 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, } else if (fuse_stale_inode(inode, generation, attr)) { /* nodeid was reused, any I/O on the old inode should fail */ fuse_make_bad(inode); - iput(inode); - goto retry; + if (inode != d_inode(sb->s_root)) { + remove_inode_hash(inode); + iput(inode); + goto retry; + } } fi = get_fuse_inode(inode); spin_lock(&fi->lock); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 5918c67dae0da9a913983e3cc0207693562c6202..b6f801e73bfdcc55e25193daedf7154bfeb77dcc 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -668,10 +668,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) LIST_HEAD(mds_list); nfs_init_cinfo_from_dreq(&cinfo, dreq); + nfs_commit_begin(cinfo.mds); nfs_scan_commit(dreq->inode, &mds_list, &cinfo); res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); - if (res < 0) /* res == -ENOMEM */ - nfs_direct_write_reschedule(dreq); + if (res < 0) { /* res == -ENOMEM */ + spin_lock(&dreq->lock); + if (dreq->flags == 0) + dreq->flags = NFS_ODIRECT_RESCHED_WRITES; + spin_unlock(&dreq->lock); + } + if (nfs_commit_end(cinfo.mds)) + nfs_direct_write_complete(dreq); } static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 7dc21a48e3e7b6a0b86f06148163450e79564f82..a142287d86f68ed411dce6f9c410e41948c570b2 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -305,6 +305,8 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio, new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len); if (IS_ERR(new)) { error = PTR_ERR(new); + if (nfs_netfs_folio_unlock(folio)) + folio_unlock(folio); goto out; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 4a250f65fa7594216b4ef9cd69ad2e2a7f9b8581..7d03811f44a4bb6b65c726403ebbbeef08dfade4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1661,7 +1661,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo) !atomic_read(&cinfo->rpcs_out)); } -static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) +void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) { atomic_inc(&cinfo->rpcs_out); } diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c index 3230ed7eaddec9b30bd8ffad90c303a1de5cc2e4..7206167f4184a92026db828458b6f0e0e67e673e 100644 --- a/fs/smb/client/cifs_debug.c +++ b/fs/smb/client/cifs_debug.c @@ -486,6 +486,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) ses->ses_count, ses->serverOS, ses->serverNOS, ses->capabilities, ses->ses_status); } + if (ses->expired_pwd) + seq_puts(m, "password no longer valid "); spin_unlock(&ses->ses_lock); seq_printf(m, "\n\tSecurity type: %s ", diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 462554917e5a10cfe8fafc82889fbc475245cb64..91a4061233f1adeb59007332a86ee30665d77070 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1052,6 +1052,7 @@ struct cifs_ses { enum securityEnum sectype; /* what security flavor was specified? */ bool sign; /* is signing required? */ bool domainAuto:1; + bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */ unsigned int flags; __u16 session_flags; __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; @@ -1562,6 +1563,7 @@ struct cifsInodeInfo { spinlock_t deferred_lock; /* protection on deferred list */ bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */ char *symlink_target; + __u32 reparse_tag; }; static inline struct cifsInodeInfo * diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 260a6299bddb8d55ffd95befc34d0435ed2666cf..0cff4f5af179317f088ef9a0f4484cb68fdb5582 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -144,7 +144,8 @@ extern int cifs_reconnect(struct TCP_Server_Info *server, extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr); extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *); extern bool backup_cred(struct cifs_sb_info *); -extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); +extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof, + bool from_readdir); extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, unsigned int bytes_written); extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); @@ -201,7 +202,8 @@ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb); extern void cifs_dir_info_to_fattr(struct cifs_fattr *, FILE_DIRECTORY_INFO *, struct cifs_sb_info *); -extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr); +extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, + bool from_readdir); extern struct inode *cifs_iget(struct super_block *sb, struct cifs_fattr *fattr); diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index c156460eb558718da6f2cba03e45f670b144f4b7..c711d5eb2987e9925a12361b864a818efe8207da 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -329,7 +329,7 @@ int cifs_posix_open(const char *full_path, struct inode **pinode, } } else { cifs_revalidate_mapping(*pinode); - rc = cifs_fattr_to_inode(*pinode, &fattr); + rc = cifs_fattr_to_inode(*pinode, &fattr, false); } posix_open_ret: @@ -4766,12 +4766,14 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode) refreshing the inode only on increases in the file size but this is tricky to do without racing with writebehind page caching in the current Linux kernel design */ -bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) +bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file, + bool from_readdir) { if (!cifsInode) return true; - if (is_inode_writable(cifsInode)) { + if (is_inode_writable(cifsInode) || + ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) { /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb; diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 6ecbf48d0f0c6281b9fa1c7bde9d058d40c41b6c..e4a6b240d2263961de5ee9bf044d354d8d5a916f 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -771,7 +771,7 @@ static void smb3_fs_context_free(struct fs_context *fc) */ static int smb3_verify_reconfigure_ctx(struct fs_context *fc, struct smb3_fs_context *new_ctx, - struct smb3_fs_context *old_ctx) + struct smb3_fs_context *old_ctx, bool need_recon) { if (new_ctx->posix_paths != old_ctx->posix_paths) { cifs_errorf(fc, "can not change posixpaths during remount\n"); @@ -797,8 +797,15 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc, } if (new_ctx->password && (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) { - cifs_errorf(fc, "can not change password during remount\n"); - return -EINVAL; + if (need_recon == false) { + cifs_errorf(fc, + "can not change password of active session during remount\n"); + return -EINVAL; + } else if (old_ctx->sectype == Kerberos) { + cifs_errorf(fc, + "can not change password for Kerberos via remount\n"); + return -EINVAL; + } } if (new_ctx->domainname && (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) { @@ -842,9 +849,14 @@ static int smb3_reconfigure(struct fs_context *fc) struct smb3_fs_context *ctx = smb3_fc2context(fc); struct dentry *root = fc->root; struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); + struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses; + bool need_recon = false; int rc; - rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx); + if (ses->expired_pwd) + need_recon = true; + + rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx, need_recon); if (rc) return rc; @@ -857,7 +869,12 @@ static int smb3_reconfigure(struct fs_context *fc) STEAL_STRING(cifs_sb, ctx, UNC); STEAL_STRING(cifs_sb, ctx, source); STEAL_STRING(cifs_sb, ctx, username); - STEAL_STRING_SENSITIVE(cifs_sb, ctx, password); + if (need_recon == false) + STEAL_STRING_SENSITIVE(cifs_sb, ctx, password); + else { + kfree_sensitive(ses->password); + ses->password = kstrdup(ctx->password, GFP_KERNEL); + } STEAL_STRING(cifs_sb, ctx, domainname); STEAL_STRING(cifs_sb, ctx, nodename); STEAL_STRING(cifs_sb, ctx, iocharset); diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index eb54e48937771a9d19b9dac638741b5297356694..cb9e719e67ae29107544746fe592405a0f1307e3 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -147,7 +147,8 @@ cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) /* populate an inode with info from a cifs_fattr struct */ int -cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) +cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr, + bool from_readdir) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); @@ -182,6 +183,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) inode->i_mode = fattr->cf_mode; cifs_i->cifsAttrs = fattr->cf_cifsattrs; + cifs_i->reparse_tag = fattr->cf_cifstag; if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) cifs_i->time = 0; @@ -198,7 +200,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) * Can't safely change the file size here if the client is writing to * it due to potential races. */ - if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { + if (is_size_safe_to_change(cifs_i, fattr->cf_eof, from_readdir)) { i_size_write(inode, fattr->cf_eof); /* @@ -209,7 +211,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9; } - if (S_ISLNK(fattr->cf_mode)) { + if (S_ISLNK(fattr->cf_mode) && fattr->cf_symlink_target) { kfree(cifs_i->symlink_target); cifs_i->symlink_target = fattr->cf_symlink_target; fattr->cf_symlink_target = NULL; @@ -367,7 +369,7 @@ static int update_inode_info(struct super_block *sb, CIFS_I(*inode)->time = 0; /* force reval */ return -ESTALE; } - return cifs_fattr_to_inode(*inode, fattr); + return cifs_fattr_to_inode(*inode, fattr, false); } #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY @@ -402,7 +404,7 @@ cifs_get_file_info_unix(struct file *filp) } else goto cifs_gfiunix_out; - rc = cifs_fattr_to_inode(inode, &fattr); + rc = cifs_fattr_to_inode(inode, &fattr, false); cifs_gfiunix_out: free_xid(xid); @@ -927,7 +929,7 @@ cifs_get_file_info(struct file *filp) fattr.cf_uniqueid = CIFS_I(inode)->uniqueid; fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; /* if filetype is different, return error */ - rc = cifs_fattr_to_inode(inode, &fattr); + rc = cifs_fattr_to_inode(inode, &fattr, false); cgfi_exit: cifs_free_open_info(&data); free_xid(xid); @@ -1103,6 +1105,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data, cifs_open_info_to_fattr(fattr, data, sb); out: + fattr->cf_cifstag = data->reparse.tag; free_rsp_buf(rsp_buftype, rsp_iov.iov_base); return rc; } @@ -1465,7 +1468,7 @@ cifs_iget(struct super_block *sb, struct cifs_fattr *fattr) } /* can't fail - see cifs_find_inode() */ - cifs_fattr_to_inode(inode, fattr); + cifs_fattr_to_inode(inode, fattr, false); if (sb->s_flags & SB_NOATIME) inode->i_flags |= S_NOATIME | S_NOCMTIME; if (inode->i_state & I_NEW) { diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c index e23cd216bffbe04a70d132d963aaf15f7d15ac8b..56033e4e4bae9ede54cc2cbcf74802efe1b30597 100644 --- a/fs/smb/client/readdir.c +++ b/fs/smb/client/readdir.c @@ -55,6 +55,23 @@ static inline void dump_cifs_file_struct(struct file *file, char *label) } #endif /* DEBUG2 */ +/* + * Match a reparse point inode if reparse tag and ctime haven't changed. + * + * Windows Server updates ctime of reparse points when their data have changed. + * The server doesn't allow changing reparse tags from existing reparse points, + * though it's worth checking. + */ +static inline bool reparse_inode_match(struct inode *inode, + struct cifs_fattr *fattr) +{ + struct timespec64 ctime = inode_get_ctime(inode); + + return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) && + CIFS_I(inode)->reparse_tag == fattr->cf_cifstag && + timespec64_equal(&ctime, &fattr->cf_ctime); +} + /* * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT * @@ -71,6 +88,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, struct super_block *sb = parent->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + int rc; cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); @@ -82,9 +100,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, * We'll end up doing an on the wire call either way and * this spares us an invalidation. */ - if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) - return; retry: + if ((fattr->cf_cifsattrs & ATTR_REPARSE) || + (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)) + return; + dentry = d_alloc_parallel(parent, name, &wq); } if (IS_ERR(dentry)) @@ -104,12 +124,34 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; - /* update inode in place - * if both i_ino and i_mode didn't change */ - if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid && - cifs_fattr_to_inode(inode, fattr) == 0) { - dput(dentry); - return; + /* + * Update inode in place if both i_ino and i_mode didn't + * change. + */ + if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { + /* + * Query dir responses don't provide enough + * information about reparse points other than + * their reparse tags. Save an invalidation by + * not clobbering the existing mode, size and + * symlink target (if any) when reparse tag and + * ctime haven't changed. + */ + rc = 0; + if (fattr->cf_cifsattrs & ATTR_REPARSE) { + if (likely(reparse_inode_match(inode, fattr))) { + fattr->cf_mode = inode->i_mode; + fattr->cf_eof = CIFS_I(inode)->server_eof; + fattr->cf_symlink_target = NULL; + } else { + CIFS_I(inode)->time = 0; + rc = -ESTALE; + } + } + if (!rc && !cifs_fattr_to_inode(inode, fattr, true)) { + dput(dentry); + return; + } } } d_invalidate(dentry); @@ -127,29 +169,6 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, dput(dentry); } -static bool reparse_file_needs_reval(const struct cifs_fattr *fattr) -{ - if (!(fattr->cf_cifsattrs & ATTR_REPARSE)) - return false; - /* - * The DFS tags should be only intepreted by server side as per - * MS-FSCC 2.1.2.1, but let's include them anyway. - * - * Besides, if cf_cifstag is unset (0), then we still need it to be - * revalidated to know exactly what reparse point it is. - */ - switch (fattr->cf_cifstag) { - case IO_REPARSE_TAG_DFS: - case IO_REPARSE_TAG_DFSR: - case IO_REPARSE_TAG_SYMLINK: - case IO_REPARSE_TAG_NFS: - case IO_REPARSE_TAG_MOUNT_POINT: - case 0: - return true; - } - return false; -} - static void cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) { @@ -181,14 +200,6 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) } out_reparse: - /* - * We need to revalidate it further to make a decision about whether it - * is a symbolic link, DFS referral or a reparse point with a direct - * access like junctions, deduplicated files, NFS symlinks. - */ - if (reparse_file_needs_reval(fattr)) - fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; - /* non-unix readdir doesn't provide nlink */ fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK; @@ -269,9 +280,6 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info, fattr->cf_dtype = DT_REG; } - if (reparse_file_needs_reval(fattr)) - fattr->cf_flags |= CIFS_FATTR_NEED_REVAL; - sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER); sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP); } @@ -333,38 +341,6 @@ cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, cifs_fill_common_info(fattr, cifs_sb); } -/* BB eventually need to add the following helper function to - resolve NT_STATUS_STOPPED_ON_SYMLINK return code when - we try to do FindFirst on (NTFS) directory symlinks */ -/* -int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb, - unsigned int xid) -{ - __u16 fid; - int len; - int oplock = 0; - int rc; - struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb); - char *tmpbuffer; - - rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, - OPEN_REPARSE_POINT, &fid, &oplock, NULL, - cifs_sb->local_nls, - cifs_remap(cifs_sb); - if (!rc) { - tmpbuffer = kmalloc(maxpath); - rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path, - tmpbuffer, - maxpath -1, - fid, - cifs_sb->local_nls); - if (CIFSSMBClose(xid, ptcon, fid)) { - cifs_dbg(FYI, "Error closing temporary reparsepoint open\n"); - } - } -} - */ - static int _initiate_cifs_search(const unsigned int xid, struct file *file, const char *full_path) @@ -433,13 +409,10 @@ _initiate_cifs_search(const unsigned int xid, struct file *file, &cifsFile->fid, search_flags, &cifsFile->srch_inf); - if (rc == 0) + if (rc == 0) { cifsFile->invalidHandle = false; - /* BB add following call to handle readdir on new NTFS symlink errors - else if STATUS_STOPPED_ON_SYMLINK - call get_symlink_reparse_path and retry with new path */ - else if ((rc == -EOPNOTSUPP) && - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { + } else if ((rc == -EOPNOTSUPP) && + (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; goto ffirst_retry; } diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 9d34a55fdb5e4140e5fb62f6b63a50e8a5218b2a..fca55702b51adacc71b5e4c468a45e705cf08c4e 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -1536,6 +1536,11 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data) &sess_data->buf0_type, CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); cifs_small_buf_release(sess_data->iov[0].iov_base); + if (rc == 0) + sess_data->ses->expired_pwd = false; + else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) + sess_data->ses->expired_pwd = true; + memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec)); return rc; diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index e8c03445271d087839aebbaf192779f4223bb948..0c97d3c860726a303081eb25927d38439bfaf4ea 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -4857,9 +4857,9 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp, file_info = (struct smb311_posix_qinfo *)rsp->Buffer; file_info->CreationTime = cpu_to_le64(fp->create_time); - time = ksmbd_UnixTimeToNT(inode->i_atime); + time = ksmbd_UnixTimeToNT(inode_get_atime(inode)); file_info->LastAccessTime = cpu_to_le64(time); - time = ksmbd_UnixTimeToNT(inode->i_mtime); + time = ksmbd_UnixTimeToNT(inode_get_mtime(inode)); file_info->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(inode_get_ctime(inode)); file_info->ChangeTime = cpu_to_le64(time); @@ -5466,9 +5466,9 @@ int smb2_close(struct ksmbd_work *work) rsp->EndOfFile = cpu_to_le64(inode->i_size); rsp->Attributes = fp->f_ci->m_fattr; rsp->CreationTime = cpu_to_le64(fp->create_time); - time = ksmbd_UnixTimeToNT(inode->i_atime); + time = ksmbd_UnixTimeToNT(inode_get_atime(inode)); rsp->LastAccessTime = cpu_to_le64(time); - time = ksmbd_UnixTimeToNT(inode->i_mtime); + time = ksmbd_UnixTimeToNT(inode_get_mtime(inode)); rsp->LastWriteTime = cpu_to_le64(time); time = ksmbd_UnixTimeToNT(inode_get_ctime(inode)); rsp->ChangeTime = cpu_to_le64(time); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index e5382f0b258782c68436a6b4f43ab036002a19d7..781206d0ec845c5a7d78e964ca9bdb5fdb9dc8aa 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -261,9 +261,6 @@ static int write_begin_slow(struct address_space *mapping, return err; } } - - SetPageUptodate(page); - ClearPageError(page); } if (PagePrivate(page)) @@ -462,9 +459,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, return err; } } - - SetPageUptodate(page); - ClearPageError(page); } err = allocate_budget(c, page, ui, appending); @@ -474,10 +468,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, * If we skipped reading the page because we were going to * write all of it, then it is not up to date. */ - if (skipped_read) { + if (skipped_read) ClearPageChecked(page); - ClearPageUptodate(page); - } /* * Budgeting failed which means it would have to force * write-back but didn't, because we set the @fast flag in the @@ -568,6 +560,9 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, goto out; } + if (len == PAGE_SIZE) + SetPageUptodate(page); + if (!PagePrivate(page)) { attach_page_private(page, (void *)1); atomic_long_inc(&c->dirty_pg_cnt); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 59dd421a8e35d0b22c294f8c67c70c92ee06288e..e990c180282e78607f70e2a883c94a3f107af43b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -157,6 +157,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; } static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ +DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) + #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 71d186d6933a5e5f700335e4bd29eb47d59ee1f8..3a4cefb25ba61c8fd44a42ba05589be49e9adf43 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -1021,6 +1021,18 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, efficiencies); } +static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) +{ + unsigned int freq; + + if (idx < 0) + return false; + + freq = policy->freq_table[idx].frequency; + + return freq == clamp_val(freq, policy->min, policy->max); +} + static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -1054,7 +1066,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, return 0; } - if (idx < 0 && efficiencies) { + /* Limit frequency index to honor policy->min/max */ + if (!cpufreq_is_in_limits(policy, idx) && efficiencies) { efficiencies = false; goto retry; } diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index 33f21bd85dbf273eb98b4095e81351c57894af9d..f3196f82fd8a14364b09490a6e1ac46a2c55b0e9 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -178,6 +178,12 @@ struct rapl_package { struct rapl_if_priv *priv; }; +struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, + bool id_is_cpu); +struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, + bool id_is_cpu); +void rapl_remove_package_cpuslocked(struct rapl_package *rp); + struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu); struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu); void rapl_remove_package(struct rapl_package *rp); diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h index f422612c28d6b4da50bd690e3b142139f267912a..8ff8eabb4a987cf908f3cfcf05d2d3ad6185effa 100644 --- a/include/linux/intel_tcc.h +++ b/include/linux/intel_tcc.h @@ -13,6 +13,6 @@ int intel_tcc_get_tjmax(int cpu); int intel_tcc_get_offset(int cpu); int intel_tcc_set_offset(int cpu, int offset); -int intel_tcc_get_temp(int cpu, bool pkg); +int intel_tcc_get_temp(int cpu, int *temp, bool pkg); #endif /* __INTEL_TCC_H__ */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 279262057a92582f81514e9d08bcfd57c57a19a0..832b7e354b4e36bb7c3567f54a276b184c1a4117 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -612,6 +612,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); +void nfs_commit_begin(struct nfs_mds_commit_info *cinfo); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); static inline bool nfs_have_writebacks(const struct inode *inode) diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h index 70998e6dd6fdc9ea5665d8e01615709b042d6b69..6ca51e0080ec092eeadfda3d55f1ef551f1f7262 100644 --- a/include/linux/phy/tegra/xusb.h +++ b/include/linux/phy/tegra/xusb.h @@ -26,6 +26,7 @@ void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy); int tegra_phy_xusb_utmi_port_reset(struct phy *phy); int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl, unsigned int port); +int tegra_xusb_padctl_get_port_number(struct phy *phy); int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy, enum usb_device_speed speed); int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 27a26092493adb723bca7814721f3a3a048dd25c..a7d5fa892be267230865df69a431e1da971a661c 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -786,7 +786,8 @@ enum UART_TX_FLAGS { if (pending < WAKEUP_CHARS) { \ uart_write_wakeup(__port); \ \ - if (!((flags) & UART_TX_NOSTOP) && pending == 0) \ + if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \ + __port->ops->tx_empty(__port)) \ __port->ops->stop_tx(__port); \ } \ \ diff --git a/include/media/media-entity.h b/include/media/media-entity.h index 2b6cd343ee9e018c8ddf2e0b04c2bc62c26b01a6..4d95893c898460fcb931085c0a040e103e5d1cac 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -225,6 +225,7 @@ enum media_pad_signal_type { * @graph_obj: Embedded structure containing the media object common data * @entity: Entity this pad belongs to * @index: Pad index in the entity pads array, numbered from 0 to n + * @num_links: Number of links connected to this pad * @sig_type: Type of the signal inside a media pad * @flags: Pad flags, as defined in * :ref:`include/uapi/linux/media.h ` @@ -236,6 +237,7 @@ struct media_pad { struct media_gobj graph_obj; /* must be first field in struct */ struct media_entity *entity; u16 index; + u16 num_links; enum media_pad_signal_type sig_type; unsigned long flags; diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index f79ce133e51a7cf12e5a12e1dc598c35cb6cb542..519d23941b541ed1bd012a5ba62ae0ca72538570 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -378,6 +378,7 @@ struct ieee802154_llsec_key { struct ieee802154_llsec_key_entry { struct list_head list; + struct rcu_head rcu; struct ieee802154_llsec_key_id id; struct ieee802154_llsec_key *key; diff --git a/io_uring/net.c b/io_uring/net.c index 4aaeada03f1e7da124c23a6c98a139ea193586f9..5a4001139e288bfb3b4e89638d317487b8bd24c3 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -87,7 +87,7 @@ static inline bool io_check_multishot(struct io_kiocb *req, * generic paths but multipoll may decide to post extra cqes. */ return !(issue_flags & IO_URING_F_IOWQ) || - !(issue_flags & IO_URING_F_MULTISHOT) || + !(req->flags & REQ_F_APOLL_MULTISHOT) || !req->ctx->task_complete; } @@ -915,7 +915,8 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) kfree(kmsg->free_iov); io_netmsg_recycle(req, issue_flags); req->flags &= ~REQ_F_NEED_CLEANUP; - } + } else if (ret == -EAGAIN) + return io_setup_async_msg(req, kmsg, issue_flags); return ret; } diff --git a/kernel/bounds.c b/kernel/bounds.c index b529182e8b04fc5e53f106b5b7d0fdeea6c877aa..c5a9fcd2d622819cc1f5b0f5cfd772514ee5f423 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -19,7 +19,7 @@ int main(void) DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS); DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES); #ifdef CONFIG_SMP - DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS)); + DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS)); #endif DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t)); #ifdef CONFIG_LRU_GEN diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index fa3bf161d13f79a6d28dc891d3b5708871e4a969..a718067deeceee3f7aff6df9317adcac104df3ee 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -192,6 +192,7 @@ static int __init mem_sleep_default_setup(char *str) if (mem_sleep_labels[state] && !strcmp(str, mem_sleep_labels[state])) { mem_sleep_default = state; + mem_sleep_current = state; break; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b67fbae1299fa8720ee17cec3f4301837b044429..c358c04d051622f3ba417ed0af57fe3fead77abc 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3284,6 +3284,21 @@ static int __init keep_bootcon_setup(char *str) early_param("keep_bootcon", keep_bootcon_setup); +static int console_call_setup(struct console *newcon, char *options) +{ + int err; + + if (!newcon->setup) + return 0; + + /* Synchronize with possible boot console. */ + console_lock(); + err = newcon->setup(newcon, options); + console_unlock(); + + return err; +} + /* * This is called by register_console() to try to match * the newly registered console with any of the ones selected @@ -3319,8 +3334,8 @@ static int try_enable_preferred_console(struct console *newcon, if (_braille_register_console(newcon, c)) return 0; - if (newcon->setup && - (err = newcon->setup(newcon, c->options)) != 0) + err = console_call_setup(newcon, c->options); + if (err) return err; } newcon->flags |= CON_ENABLED; @@ -3346,7 +3361,7 @@ static void try_enable_default_console(struct console *newcon) if (newcon->index < 0) newcon->index = 0; - if (newcon->setup && newcon->setup(newcon, NULL) != 0) + if (console_call_setup(newcon, NULL) != 0) return; newcon->flags |= CON_ENABLED; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b0e7289bc9c2d94bb45f1517df3d1925d69a4a7a..6be54597da7fc51d0df4336a72bf2605f36a54fe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11608,11 +11608,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). */ - cpus_read_lock(); - mutex_lock(&cfs_constraints_mutex); + guard(cpus_read_lock)(); + guard(mutex)(&cfs_constraints_mutex); + ret = __cfs_schedulable(tg, period, quota); if (ret) - goto out_unlock; + return ret; runtime_enabled = quota != RUNTIME_INF; runtime_was_enabled = cfs_b->quota != RUNTIME_INF; @@ -11622,39 +11623,38 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, */ if (runtime_enabled && !runtime_was_enabled) cfs_bandwidth_usage_inc(); - raw_spin_lock_irq(&cfs_b->lock); - cfs_b->period = ns_to_ktime(period); - cfs_b->quota = quota; - cfs_b->burst = burst; - __refill_cfs_bandwidth_runtime(cfs_b); + scoped_guard (raw_spinlock_irq, &cfs_b->lock) { + cfs_b->period = ns_to_ktime(period); + cfs_b->quota = quota; + cfs_b->burst = burst; - /* Restart the period timer (if active) to handle new period expiry: */ - if (runtime_enabled) - start_cfs_bandwidth(cfs_b); + __refill_cfs_bandwidth_runtime(cfs_b); - raw_spin_unlock_irq(&cfs_b->lock); + /* + * Restart the period timer (if active) to handle new + * period expiry: + */ + if (runtime_enabled) + start_cfs_bandwidth(cfs_b); + } for_each_online_cpu(i) { struct cfs_rq *cfs_rq = tg->cfs_rq[i]; struct rq *rq = cfs_rq->rq; - struct rq_flags rf; - rq_lock_irq(rq, &rf); + guard(rq_lock_irq)(rq); cfs_rq->runtime_enabled = runtime_enabled; cfs_rq->runtime_remaining = 0; if (cfs_rq->throttled) unthrottle_cfs_rq(cfs_rq); - rq_unlock_irq(rq, &rf); } + if (runtime_was_enabled && !runtime_enabled) cfs_bandwidth_usage_dec(); -out_unlock: - mutex_unlock(&cfs_constraints_mutex); - cpus_read_unlock(); - return ret; + return 0; } static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index ce39ce9f3526ea2eee7891b62302369b076e8974..2829ddb0e316b4d300bdae37d0c82e4f2882bd35 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c @@ -170,8 +170,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *p) if (addr >= start && addr < start + IO_SPACE_LIMIT) return; - iounmap(p); #endif + iounmap(p); } EXPORT_SYMBOL(pci_iounmap); diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index b61cc6a42541ad2f87efeddfb90a5e6d0122b580..0119075d2e58e8f9de7073ed325b048927b16ac6 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -450,7 +450,8 @@ static void kmalloc_oob_16(struct kunit *test) /* This test is specifically crafted for the generic mode. */ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); - ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); + /* RELOC_HIDE to prevent gcc from warning about short alloc */ + ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); diff --git a/mm/swapfile.c b/mm/swapfile.c index aa767f925d4d0d73c6a59f0e64bcc24c99197089..4ca5ad21d335b52c7e68a749918c707d01795c8b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1227,6 +1227,11 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, * with get_swap_device() and put_swap_device(), unless the swap * functions call get/put_swap_device() by themselves. * + * Note that when only holding the PTL, swapoff might succeed immediately + * after freeing a swap entry. Therefore, immediately after + * __swap_entry_free(), the swap info might become stale and should not + * be touched without a prior get_swap_device(). + * * Check whether swap entry is valid in the swap device. If so, * return pointer to swap_info_struct, and keep the swap entry valid * via preventing the swap device from being swapoff, until @@ -1604,13 +1609,19 @@ int free_swap_and_cache(swp_entry_t entry) if (non_swap_entry(entry)) return 1; - p = _swap_info_get(entry); + p = get_swap_device(entry); if (p) { + if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { + put_swap_device(p); + return 0; + } + count = __swap_entry_free(p, entry); if (count == SWAP_HAS_CACHE && !swap_page_trans_huge_swapped(p, entry)) __try_to_reclaim_swap(p, swp_offset(entry), TTRS_UNMAPPED | TTRS_FULL); + put_swap_device(p); } return p != NULL; } diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 8d2eabc71bbeb04358dc04bb40e3ac03188e6380..f13b07ebfb98a66b2dc48896ea35f6195b164157 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -265,19 +265,27 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec, return -ENOMEM; } +static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu) +{ + struct ieee802154_llsec_key_entry *pos; + struct mac802154_llsec_key *mkey; + + pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu); + mkey = container_of(pos->key, struct mac802154_llsec_key, key); + + llsec_key_put(mkey); + kfree_sensitive(pos); +} + int mac802154_llsec_key_del(struct mac802154_llsec *sec, const struct ieee802154_llsec_key_id *key) { struct ieee802154_llsec_key_entry *pos; list_for_each_entry(pos, &sec->table.keys, list) { - struct mac802154_llsec_key *mkey; - - mkey = container_of(pos->key, struct mac802154_llsec_key, key); - if (llsec_key_id_equal(&pos->id, key)) { list_del_rcu(&pos->list); - llsec_key_put(mkey); + call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu); return 0; } } diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 2fe6f2828d3769a88da2a8ffcae91a64442fc9fb..16c750bb95fafdc7a1b555260c34745d934ecb9a 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -143,6 +143,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict) +KBUILD_CFLAGS += -Wno-enum-compare-conditional +KBUILD_CFLAGS += -Wno-enum-enum-conversion endif endif diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index 245cc650a4dc99c6e750dc49c84516eb44e0b198..336bedaa3af6890fe6031528f7a4db7f65eb7968 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -32,6 +32,18 @@ #include "ruleset.h" #include "setup.h" +static bool is_initialized(void) +{ + if (likely(landlock_initialized)) + return true; + + pr_warn_once( + "Disabled but requested by user space. " + "You should enable Landlock at boot time: " + "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n"); + return false; +} + /** * copy_min_struct_from_user - Safe future-proof argument copying * @@ -165,7 +177,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset, /* Build-time checks. */ build_check_abi(); - if (!landlock_initialized) + if (!is_initialized()) return -EOPNOTSUPP; if (flags) { @@ -311,7 +323,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, struct landlock_ruleset *ruleset; int res, err; - if (!landlock_initialized) + if (!is_initialized()) return -EOPNOTSUPP; /* No flag for now. */ @@ -402,7 +414,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, struct landlock_cred_security *new_llcred; int err; - if (!landlock_initialized) + if (!is_initialized()) return -EOPNOTSUPP; /* diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 1f1ea8529421fd949c0c85f4bdf0e5f0d4d26b21..e1e297deb02e6aa077703c71e973ad318b5372f2 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -1312,7 +1312,8 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap, check_star = 1; } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { check_priv = 1; - if (size != TRANS_TRUE_SIZE || + if (!S_ISDIR(d_backing_inode(dentry)->i_mode) || + size != TRANS_TRUE_SIZE || strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) rc = -EINVAL; } else @@ -2853,6 +2854,15 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name, if (value == NULL || size > SMK_LONGLABEL || size == 0) return -EINVAL; + if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) { + if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE || + strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) + return -EINVAL; + + nsp->smk_flags |= SMK_INODE_TRANSMUTE; + return 0; + } + skp = smk_import_entry(value, size); if (IS_ERR(skp)) return PTR_ERR(skp); diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting new file mode 100644 index 0000000000000000000000000000000000000000..a953c96aa16e1e814867b33d06e894ffb664bb1b --- /dev/null +++ b/tools/testing/selftests/mqueue/setting @@ -0,0 +1 @@ +timeout=180 diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index e033c79d528e0040e88fdd02f6eec3c6d6c00213..28658b9e0d96841eb3799efc88af1d1e9ccff9f3 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -87,7 +87,27 @@ static void async_pf_execute(struct work_struct *work) __kvm_vcpu_wake_up(vcpu); mmput(mm); - kvm_put_kvm(vcpu->kvm); +} + +static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) +{ + /* + * The async #PF is "done", but KVM must wait for the work item itself, + * i.e. async_pf_execute(), to run to completion. If KVM is a module, + * KVM must ensure *no* code owned by the KVM (the module) can be run + * after the last call to module_put(). Note, flushing the work item + * is always required when the item is taken off the completion queue. + * E.g. even if the vCPU handles the item in the "normal" path, the VM + * could be terminated before async_pf_execute() completes. + * + * Wake all events skip the queue and go straight done, i.e. don't + * need to be flushed (but sanity check that the work wasn't queued). + */ + if (work->wakeup_all) + WARN_ON_ONCE(work->work.func); + else + flush_work(&work->work); + kmem_cache_free(async_pf_cache, work); } void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) @@ -114,7 +134,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) #else if (cancel_work_sync(&work->work)) { mmput(work->mm); - kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ kmem_cache_free(async_pf_cache, work); } #endif @@ -126,7 +145,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) list_first_entry(&vcpu->async_pf.done, typeof(*work), link); list_del(&work->link); - kmem_cache_free(async_pf_cache, work); + + spin_unlock(&vcpu->async_pf.lock); + kvm_flush_and_free_async_pf_work(work); + spin_lock(&vcpu->async_pf.lock); } spin_unlock(&vcpu->async_pf.lock); @@ -151,7 +173,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) list_del(&work->queue); vcpu->async_pf.queued--; - kmem_cache_free(async_pf_cache, work); + kvm_flush_and_free_async_pf_work(work); } } @@ -186,7 +208,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, work->arch = *arch; work->mm = current->mm; mmget(work->mm); - kvm_get_kvm(work->vcpu->kvm); INIT_WORK(&work->work, async_pf_execute);