diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 7e54b6e91071e267e47433dcfdda718d9f22e1f0..2f6ce5dbb77ded6a1929263e13eff6d672a8333f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2226,6 +2226,8 @@ no_x2apic_optout BIOS x2APIC opt-out request will be ignored nopost disable Interrupt Posting + posted_msi + enable MSIs delivered as posted interrupts iomem= Disable strict checking of access to MMIO memory strict regions from userspace. @@ -2333,11 +2335,6 @@ isapnp= [ISAPNP] Format: ,,, - zhaoxin_patch_bitmask= - [X86] Bitmask for Zhaoxin Platform's patch. - bit 0: enable KH-40000 dma patch's node check function - - isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance. [Deprecated - use cpusets instead] Format: [flag-list,] diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml index d25423aa71674c40c5718fa33cd01de27f2441b6..94d4a4914a48392a17fd8ca97171d3df6e86b106 100644 --- a/Documentation/devicetree/bindings/pci/host-generic-pci.yaml +++ b/Documentation/devicetree/bindings/pci/host-generic-pci.yaml @@ -110,6 +110,12 @@ properties: iommu-map-mask: true msi-parent: true + ats-supported: + description: + Indicates that a PCIe host controller supports ATS, and can handle Memory + Requests with Address Type (AT). + type: boolean + required: - compatible - reg diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 197707bc7658898843d78bee7c0f02a10f7d26b9..6b85e94f3275995bc2a6f8e8624f08198cf665f9 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, /* * Plug in direct dma map ops. */ -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { /* * IOC hardware snoops all DMA traffic keeping the caches consistent diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index b94850b579952aefacbd1710bc3c317b4c4b77c9..97db5397c320bd93a8a9148fdfd6f9255a5abfba 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, } } -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { if (IS_ENABLED(CONFIG_CPU_V7M)) { /* diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6c359a3af8d9c73e35686efe538f1f2754d58a07..5ac482d7ff94c430c9445ba24c07dc09b1191277 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1712,11 +1712,15 @@ void arm_iommu_detach_device(struct device *dev) } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +static void arm_setup_iommu_dma_ops(struct device *dev) { struct dma_iommu_mapping *mapping; + u64 dma_base = 0, size = 1ULL << 32; + if (dev->dma_range_map) { + dma_base = dma_range_map_min(dev->dma_range_map); + size = dma_range_map_max(dev->dma_range_map) - dma_base; + } mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", @@ -1747,8 +1751,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) #else -static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +static void arm_setup_iommu_dma_ops(struct device *dev) { } @@ -1756,8 +1759,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } #endif /* CONFIG_ARM_DMA_USE_IOMMU */ -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { /* * Due to legacy code that sets the ->dma_coherent flag from a bus @@ -1777,7 +1779,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, return; if (device_iommu_mapped(dev)) - arm_setup_iommu_dma_ops(dev, dma_base, size, coherent); + arm_setup_iommu_dma_ops(dev); xen_setup_dma_ops(dev); dev->archdata.dma_ops_setup = true; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b8d7b905b8a2d39c75459e862d9cfde35570fc69..2e2ff6f4b44bac80fd11893bc3b7610b3c9815bc 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -49,7 +49,6 @@ config ARM64 select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYSCALL_WRAPPER - select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_ELF_PROT diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts index 60472d65a35574033dbcc0eba024d4231a738629..85f1c15cc65d06187a74d19570c6fbd0ac32e5c8 100644 --- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts +++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts @@ -243,6 +243,7 @@ pci: pci@40000000 { iommu-map = <0x0 &smmu 0x0 0x10000>; dma-coherent; + ats-supported; }; smmu: iommu@2b400000 { diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 61886e43e3a10fe8c84ce6febffc5d65703a2cce..b2b5792b2caaf81ccfc3204c94395bb0faeabddd 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -39,15 +38,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) dcache_clean_poc(start, start + size); } -#ifdef CONFIG_IOMMU_DMA -void arch_teardown_dma_ops(struct device *dev) -{ - dev->dma_ops = NULL; -} -#endif - -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { int cls = cache_line_size_of_cpu(); @@ -58,8 +49,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ARCH_DMA_MINALIGN, cls); dev->dma_coherent = coherent; - if (device_iommu_mapped(dev)) - iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); xen_setup_dma_ops(dev); } diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c index cc0ccde58db87f8ddd6d5e568f40f864bd55b0c1..3b6c16dc90b3e0c1572d1162c0621068451de9ed 100644 --- a/arch/loongarch/kernel/dma.c +++ b/arch/loongarch/kernel/dma.c @@ -30,7 +30,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) void acpi_arch_dma_setup(struct device *dev) { int ret; - u64 mask, end = 0; + u64 mask, end; const struct bus_dma_region *map = NULL; if (node_id_offset == 0) { @@ -40,12 +40,7 @@ void acpi_arch_dma_setup(struct device *dev) ret = acpi_dma_get_range(dev, &map); if (!ret && map) { - const struct bus_dma_region *r = map; - - for (end = 0; r->size; r++) { - if (r->dma_start + r->size - 1 > end) - end = r->dma_start + r->size - 1; - } + end = dma_range_map_max(map); mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 0f3cec663a12cd51498157c390f974213cb5a658..ab4f2a75a7d0116c31628b4c18c7e5f9d754a28e 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, #endif #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { dev->dma_coherent = coherent; } diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 0266959435505a6024bd96fe29e9cd3a302d928a..6ce13ef3e37dddd7f854cb4138052b1a93102e4e 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -156,6 +156,9 @@ extern int iommu_tce_table_put(struct iommu_table *tbl); extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, unsigned long res_start, unsigned long res_end); bool iommu_table_in_use(struct iommu_table *tbl); +extern void iommu_table_reserve_pages(struct iommu_table *tbl, + unsigned long res_start, unsigned long res_end); +extern void iommu_table_clear(struct iommu_table *tbl); #define IOMMU_TABLE_GROUP_MAX_TABLES 2 @@ -178,9 +181,9 @@ struct iommu_table_group_ops { long (*unset_window)(struct iommu_table_group *table_group, int num); /* Switch ownership from platform code to external user (e.g. VFIO) */ - long (*take_ownership)(struct iommu_table_group *table_group); + long (*take_ownership)(struct iommu_table_group *table_group, struct device *dev); /* Switch ownership from external user (e.g. VFIO) back to core */ - void (*release_ownership)(struct iommu_table_group *table_group); + void (*release_ownership)(struct iommu_table_group *table_group, struct device *dev); }; struct iommu_table_group_link { @@ -217,8 +220,8 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, enum dma_data_direction *direction); extern void iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages); +int dev_has_iommu_table(struct device *dev, void *data); -extern struct iommu_table_group_ops spapr_tce_table_group_ops; #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, @@ -231,6 +234,11 @@ static inline int iommu_add_device(struct iommu_table_group *table_group, { return 0; } + +static inline int dev_has_iommu_table(struct device *dev, void *data) +{ + return 0; +} #endif /* !CONFIG_IOMMU_API */ u64 dma_iommu_get_required_mask(struct device *dev); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 82626363a3090cdbedf69a17cb7c9c4782a456ec..c0cef454756058c9fccd0e40a6d7eb493ce33e65 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -1265,22 +1265,6 @@ EXPORT_SYMBOL(eeh_dev_release); #ifdef CONFIG_IOMMU_API -static int dev_has_iommu_table(struct device *dev, void *data) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct pci_dev **ppdev = data; - - if (!dev) - return 0; - - if (device_iommu_mapped(dev)) { - *ppdev = pdev; - return 1; - } - - return 0; -} - /** * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE * @group: IOMMU group diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 1185efebf032b6e7d2cf08db4c953938948a44b1..5c953c0211b3535b3de22d9b0ccf7c203815c359 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -642,7 +642,7 @@ void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, tbl->it_ops->flush(tbl); } -static void iommu_table_clear(struct iommu_table *tbl) +void iommu_table_clear(struct iommu_table *tbl) { /* * In case of firmware assisted dump system goes through clean @@ -683,7 +683,7 @@ static void iommu_table_clear(struct iommu_table *tbl) #endif } -static void iommu_table_reserve_pages(struct iommu_table *tbl, +void iommu_table_reserve_pages(struct iommu_table *tbl, unsigned long res_start, unsigned long res_end) { int i; @@ -987,6 +987,23 @@ unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir) EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm); #ifdef CONFIG_IOMMU_API + +int dev_has_iommu_table(struct device *dev, void *data) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev **ppdev = data; + + if (!dev) + return 0; + + if (device_iommu_mapped(dev)) { + *ppdev = pdev; + return 1; + } + + return 0; +} + /* * SPAPR TCE API */ @@ -1101,59 +1118,6 @@ void iommu_tce_kill(struct iommu_table *tbl, } EXPORT_SYMBOL_GPL(iommu_tce_kill); -#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) -static int iommu_take_ownership(struct iommu_table *tbl) -{ - unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; - int ret = 0; - - /* - * VFIO does not control TCE entries allocation and the guest - * can write new TCEs on top of existing ones so iommu_tce_build() - * must be able to release old pages. This functionality - * requires exchange() callback defined so if it is not - * implemented, we disallow taking ownership over the table. - */ - if (!tbl->it_ops->xchg_no_kill) - return -EINVAL; - - spin_lock_irqsave(&tbl->large_pool.lock, flags); - for (i = 0; i < tbl->nr_pools; i++) - spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); - - if (iommu_table_in_use(tbl)) { - pr_err("iommu_tce: it_map is not empty"); - ret = -EBUSY; - } else { - memset(tbl->it_map, 0xff, sz); - } - - for (i = 0; i < tbl->nr_pools; i++) - spin_unlock(&tbl->pools[i].lock); - spin_unlock_irqrestore(&tbl->large_pool.lock, flags); - - return ret; -} - -static void iommu_release_ownership(struct iommu_table *tbl) -{ - unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; - - spin_lock_irqsave(&tbl->large_pool.lock, flags); - for (i = 0; i < tbl->nr_pools; i++) - spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); - - memset(tbl->it_map, 0, sz); - - iommu_table_reserve_pages(tbl, tbl->it_reserved_start, - tbl->it_reserved_end); - - for (i = 0; i < tbl->nr_pools; i++) - spin_unlock(&tbl->pools[i].lock); - spin_unlock_irqrestore(&tbl->large_pool.lock, flags); -} -#endif - int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) { /* @@ -1185,98 +1149,6 @@ int iommu_add_device(struct iommu_table_group *table_group, struct device *dev) EXPORT_SYMBOL_GPL(iommu_add_device); #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) -/* - * A simple iommu_table_group_ops which only allows reusing the existing - * iommu_table. This handles VFIO for POWER7 or the nested KVM. - * The ops does not allow creating windows and only allows reusing the existing - * one if it matches table_group->tce32_start/tce32_size/page_shift. - */ -static unsigned long spapr_tce_get_table_size(__u32 page_shift, - __u64 window_size, __u32 levels) -{ - unsigned long size; - - if (levels > 1) - return ~0U; - size = window_size >> (page_shift - 3); - return size; -} - -static long spapr_tce_create_table(struct iommu_table_group *table_group, int num, - __u32 page_shift, __u64 window_size, __u32 levels, - struct iommu_table **ptbl) -{ - struct iommu_table *tbl = table_group->tables[0]; - - if (num > 0) - return -EPERM; - - if (tbl->it_page_shift != page_shift || - tbl->it_size != (window_size >> page_shift) || - tbl->it_indirect_levels != levels - 1) - return -EINVAL; - - *ptbl = iommu_tce_table_get(tbl); - return 0; -} - -static long spapr_tce_set_window(struct iommu_table_group *table_group, - int num, struct iommu_table *tbl) -{ - return tbl == table_group->tables[num] ? 0 : -EPERM; -} - -static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num) -{ - return 0; -} - -static long spapr_tce_take_ownership(struct iommu_table_group *table_group) -{ - int i, j, rc = 0; - - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { - struct iommu_table *tbl = table_group->tables[i]; - - if (!tbl || !tbl->it_map) - continue; - - rc = iommu_take_ownership(tbl); - if (!rc) - continue; - - for (j = 0; j < i; ++j) - iommu_release_ownership(table_group->tables[j]); - return rc; - } - return 0; -} - -static void spapr_tce_release_ownership(struct iommu_table_group *table_group) -{ - int i; - - for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { - struct iommu_table *tbl = table_group->tables[i]; - - if (!tbl) - continue; - - iommu_table_clear(tbl); - if (tbl->it_map) - iommu_release_ownership(tbl); - } -} - -struct iommu_table_group_ops spapr_tce_table_group_ops = { - .get_table_size = spapr_tce_get_table_size, - .create_table = spapr_tce_create_table, - .set_window = spapr_tce_set_window, - .unset_window = spapr_tce_unset_window, - .take_ownership = spapr_tce_take_ownership, - .release_ownership = spapr_tce_release_ownership, -}; - /* * A simple iommu_ops to allow less cruft in generic VFIO code. */ @@ -1299,7 +1171,7 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain, * The domain being set to PLATFORM from earlier * BLOCKED. The table_group ownership has to be released. */ - table_group->ops->release_ownership(table_group); + table_group->ops->release_ownership(table_group, dev); iommu_group_put(grp); return 0; @@ -1327,7 +1199,7 @@ spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain, * also sets the dma_api ops */ table_group = iommu_group_get_iommudata(grp); - ret = table_group->ops->take_ownership(table_group); + ret = table_group->ops->take_ownership(table_group, dev); iommu_group_put(grp); return ret; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 1202a69b0a2088a8ecee3a14c091c362fbf6cfb5..4cd9c0de22c2bd18e6ad7e95344686082d7b869b 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -424,23 +424,6 @@ static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, cell_iommu_enable_hardware(iommu); } -#if 0/* Unused for now */ -static struct iommu_window *find_window(struct cbe_iommu *iommu, - unsigned long offset, unsigned long size) -{ - struct iommu_window *window; - - /* todo: check for overlapping (but not equal) windows) */ - - list_for_each_entry(window, &(iommu->windows), list) { - if (window->offset == offset && window->size == size) - return window; - } - - return NULL; -} -#endif - static inline u32 cell_iommu_get_ioid(struct device_node *np) { const u32 *ioid; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 28fac4770073166557610248c081d0a4d1775084..d557bce987ca08bbe01a0418c2091d989f8c1baa 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1537,7 +1537,8 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) } } -static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) +static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); @@ -1562,7 +1563,8 @@ static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) return 0; } -static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) +static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group, + struct device *dev __maybe_unused) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index bf02f94a973dbde0aee5d627ad4a93ee7c8c2e39..23c800c88a481f2c54a07606db4e8992a0ec2493 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,10 @@ static struct iommu_table *iommu_pseries_alloc_table(int node) return tbl; } +#ifdef CONFIG_IOMMU_API +static struct iommu_table_group_ops spapr_tce_table_group_ops; +#endif + static struct iommu_table_group *iommu_pseries_alloc_group(int node) { struct iommu_table_group *table_group; @@ -102,7 +107,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, #endif /* Default DMA window table is at index 0, while DDW at 1. SR-IOV - * adapters only have table on index 1. + * adapters only have table on index 0(if not direct mapped). */ if (table_group->tables[0]) iommu_tce_table_put(table_group->tables[0]); @@ -143,7 +148,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, } -static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) +static void tce_clear_pSeries(struct iommu_table *tbl, long index, long npages) { __be64 *tcep; @@ -162,6 +167,39 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) return be64_to_cpu(*tcep); } +#ifdef CONFIG_IOMMU_API +static long pseries_tce_iommu_userspace_view_alloc(struct iommu_table *tbl) +{ + unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) * tbl->it_size, PAGE_SIZE); + unsigned long *uas; + + if (tbl->it_indirect_levels) /* Impossible */ + return -EPERM; + + WARN_ON(tbl->it_userspace); + + uas = vzalloc(cb); + if (!uas) + return -ENOMEM; + + tbl->it_userspace = (__be64 *) uas; + + return 0; +} +#endif + +static void tce_iommu_userspace_view_free(struct iommu_table *tbl) +{ + vfree(tbl->it_userspace); + tbl->it_userspace = NULL; +} + +static void tce_free_pSeries(struct iommu_table *tbl) +{ + if (!tbl->it_userspace) + tce_iommu_userspace_view_free(tbl); +} + static void tce_free_pSeriesLP(unsigned long liobn, long, long, long); static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); @@ -576,7 +614,7 @@ struct iommu_table_ops iommu_table_lpar_multi_ops; struct iommu_table_ops iommu_table_pseries_ops = { .set = tce_build_pSeries, - .clear = tce_free_pSeries, + .clear = tce_clear_pSeries, .get = tce_get_pseries }; @@ -685,17 +723,47 @@ static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned return rc; } + +static __be64 *tce_useraddr_pSeriesLP(struct iommu_table *tbl, long index, + bool __always_unused alloc) +{ + return tbl->it_userspace ? &tbl->it_userspace[index - tbl->it_offset] : NULL; +} #endif struct iommu_table_ops iommu_table_lpar_multi_ops = { .set = tce_buildmulti_pSeriesLP, #ifdef CONFIG_IOMMU_API .xchg_no_kill = tce_exchange_pseries, + .useraddrptr = tce_useraddr_pSeriesLP, #endif .clear = tce_freemulti_pSeriesLP, - .get = tce_get_pSeriesLP + .get = tce_get_pSeriesLP, + .free = tce_free_pSeries }; +#ifdef CONFIG_IOMMU_API +/* + * When the DMA window properties might have been removed, + * the parent node has the table_group setup on it. + */ +static struct device_node *pci_dma_find_parent_node(struct pci_dev *dev, + struct iommu_table_group *table_group) +{ + struct device_node *dn = pci_device_to_OF_node(dev); + struct pci_dn *rpdn; + + for (; dn && PCI_DN(dn); dn = dn->parent) { + rpdn = PCI_DN(dn); + + if (table_group == rpdn->table_group) + return dn; + } + + return NULL; +} +#endif + /* * Find nearest ibm,dma-window (default DMA window) or direct DMA window or * dynamic 64bit DMA window, walking up the device tree. @@ -812,13 +880,6 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) be32_to_cpu(prop.tce_shift), NULL, &iommu_table_lpar_multi_ops); - /* Only for normal boot with default window. Doesn't matter even - * if we set these with DDW which is 64bit during kdump, since - * these will not be used during kdump. - */ - ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base); - ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); - if (!iommu_init_table(tbl, ppci->phb->node, 0, 0)) panic("Failed to initialize iommu table"); @@ -917,7 +978,7 @@ static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liob } static void remove_dma_window(struct device_node *np, u32 *ddw_avail, - struct property *win) + struct property *win, bool cleanup) { struct dynamic_dma_window_prop *dwp; u64 liobn; @@ -925,11 +986,44 @@ static void remove_dma_window(struct device_node *np, u32 *ddw_avail, dwp = win->value; liobn = (u64)be32_to_cpu(dwp->liobn); - clean_dma_window(np, dwp); + if (cleanup) + clean_dma_window(np, dwp); __remove_dma_window(np, ddw_avail, liobn); } -static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name) +static void copy_property(struct device_node *pdn, const char *from, const char *to) +{ + struct property *src, *dst; + + src = of_find_property(pdn, from, NULL); + if (!src) + return; + + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) + return; + + dst->name = kstrdup(to, GFP_KERNEL); + dst->value = kmemdup(src->value, src->length, GFP_KERNEL); + dst->length = src->length; + if (!dst->name || !dst->value) + return; + + if (of_add_property(pdn, dst)) { + pr_err("Unable to add DMA window property for %pOF", pdn); + goto free_prop; + } + + return; + +free_prop: + kfree(dst->name); + kfree(dst->value); + kfree(dst); +} + +static int remove_dma_window_named(struct device_node *np, bool remove_prop, const char *win_name, + bool cleanup) { struct property *win; u32 ddw_avail[DDW_APPLICABLE_SIZE]; @@ -944,13 +1038,20 @@ static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_ if (ret) return 0; - if (win->length >= sizeof(struct dynamic_dma_window_prop)) - remove_dma_window(np, ddw_avail, win); + remove_dma_window(np, ddw_avail, win, cleanup); if (!remove_prop) return 0; + /* Default window property if removed is lost as reset-pe doesn't restore it. + * Though FDT has a copy of it, the DLPAR hotplugged devices will not have a + * node on FDT until next reboot. So, back it up. + */ + if ((strcmp(win_name, "ibm,dma-window") == 0) && + !of_find_property(np, "ibm,dma-window-saved", NULL)) + copy_property(np, win_name, "ibm,dma-window-saved"); + ret = of_remove_property(np, win); if (ret) pr_warn("%pOF: failed to remove DMA window property: %d\n", @@ -1008,7 +1109,7 @@ static void find_existing_ddw_windows_named(const char *name) for_each_node_with_property(pdn, name) { dma64 = of_get_property(pdn, name, &len); if (!dma64 || len < sizeof(*dma64)) { - remove_ddw(pdn, true, name); + remove_dma_window_named(pdn, true, name, true); continue; } @@ -1381,7 +1482,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (reset_win_ext) goto out_failed; - remove_dma_window(pdn, ddw_avail, default_win); + remove_dma_window(pdn, ddw_avail, default_win, true); default_win_removed = true; /* Query again, to check if the window is available */ @@ -1477,6 +1578,11 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) clean_dma_window(pdn, win64->value); goto out_del_list; } + if (default_win_removed) { + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + set_iommu_table_base(&dev->dev, NULL); + } } else { struct iommu_table *newtbl; int i; @@ -1506,7 +1612,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); iommu_init_table(newtbl, pci->phb->node, start, end); - pci->table_group->tables[1] = newtbl; + pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl; set_iommu_table_base(&dev->dev, newtbl); } @@ -1514,7 +1620,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (default_win_removed) { iommu_tce_table_put(pci->table_group->tables[0]); pci->table_group->tables[0] = NULL; - + if (!of_find_property(pdn, "ibm,dma-window-saved", NULL)) + copy_property(pdn, "ibm,dma-window", "ibm,dma-window-saved"); /* default_win is valid here because default_win_removed == true */ of_remove_property(pdn, default_win); dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn); @@ -1566,6 +1673,71 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) return direct_mapping; } +static __u64 query_page_size_to_mask(u32 query_page_size) +{ + const long shift[] = { + (SZ_4K), (SZ_64K), (SZ_16M), + (SZ_32M), (SZ_64M), (SZ_128M), + (SZ_256M), (SZ_16G), (SZ_2M) + }; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(shift); i++) { + if (query_page_size & (1 << i)) + ret |= shift[i]; + } + + return ret; +} + +static void spapr_tce_init_table_group(struct pci_dev *pdev, + struct device_node *pdn, + struct dynamic_dma_window_prop prop) +{ + struct iommu_table_group *table_group = PCI_DN(pdn)->table_group; + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + + struct ddw_query_response query; + int ret; + + /* Only for normal boot with default window. Doesn't matter during + * kdump, since these will not be used during kdump. + */ + if (is_kdump_kernel()) + return; + + if (table_group->max_dynamic_windows_supported != 0) + return; /* already initialized */ + + table_group->tce32_start = be64_to_cpu(prop.dma_base); + table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); + + if (!of_find_property(pdn, "ibm,dma-window", NULL)) + dev_err(&pdev->dev, "default dma window missing!\n"); + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + table_group->max_dynamic_windows_supported = -1; + return; + } + + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) { + dev_err(&pdev->dev, "%s: query_ddw failed\n", __func__); + table_group->max_dynamic_windows_supported = -1; + return; + } + + if (query.windows_available == 0) + table_group->max_dynamic_windows_supported = 1; + else + table_group->max_dynamic_windows_supported = IOMMU_TABLE_GROUP_MAX_TABLES; + + table_group->max_levels = 1; + table_group->pgsizes |= query_page_size_to_mask(query.page_size); +} + static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; @@ -1605,13 +1777,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) be32_to_cpu(prop.tce_shift), NULL, &iommu_table_lpar_multi_ops); - /* Only for normal boot with default window. Doesn't matter even - * if we set these with DDW which is 64bit during kdump, since - * these will not be used during kdump. - */ - pci->table_group->tce32_start = be64_to_cpu(prop.dma_base); - pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift); - iommu_init_table(tbl, pci->phb->node, 0, 0); iommu_register_group(pci->table_group, pci_domain_nr(pci->phb->bus), 0); @@ -1620,6 +1785,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug(" found DMA window, table: %p\n", pci->table_group); } + spapr_tce_init_table_group(dev, pdn, prop); + set_iommu_table_base(&dev->dev, pci->table_group->tables[0]); iommu_add_device(pci->table_group, &dev->dev); } @@ -1647,6 +1814,491 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) return false; } +#ifdef CONFIG_IOMMU_API +/* + * A simple iommu_table_group_ops which only allows reusing the existing + * iommu_table. This handles VFIO for POWER7 or the nested KVM. + * The ops does not allow creating windows and only allows reusing the existing + * one if it matches table_group->tce32_start/tce32_size/page_shift. + */ +static unsigned long spapr_tce_get_table_size(__u32 page_shift, + __u64 window_size, __u32 levels) +{ + unsigned long size; + + if (levels > 1) + return ~0U; + size = window_size >> (page_shift - 3); + return size; +} + +static struct pci_dev *iommu_group_get_first_pci_dev(struct iommu_group *group) +{ + struct pci_dev *pdev = NULL; + int ret; + + /* No IOMMU group ? */ + if (!group) + return NULL; + + ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table); + if (!ret || !pdev) + return NULL; + return pdev; +} + +static void restore_default_dma_window(struct pci_dev *pdev, struct device_node *pdn) +{ + reset_dma_window(pdev, pdn); + copy_property(pdn, "ibm,dma-window-saved", "ibm,dma-window"); +} + +static long remove_dynamic_dma_windows(struct pci_dev *pdev, struct device_node *pdn) +{ + struct pci_dn *pci = PCI_DN(pdn); + struct dma_win *window; + bool direct_mapping; + int len; + + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, &direct_mapping)) { + remove_dma_window_named(pdn, true, direct_mapping ? + DIRECT64_PROPNAME : DMA64_PROPNAME, true); + if (!direct_mapping) { + WARN_ON(!pci->table_group->tables[0] && !pci->table_group->tables[1]); + + if (pci->table_group->tables[1]) { + iommu_tce_table_put(pci->table_group->tables[1]); + pci->table_group->tables[1] = NULL; + } else if (pci->table_group->tables[0]) { + /* Default window was removed and only the DDW exists */ + iommu_tce_table_put(pci->table_group->tables[0]); + pci->table_group->tables[0] = NULL; + } + } + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + return 0; +} + +static long pseries_setup_default_iommu_config(struct iommu_table_group *table_group, + struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + const __be32 *default_prop; + long liobn, offset, size; + struct device_node *pdn; + struct iommu_table *tbl; + struct pci_dn *pci; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { + dev_warn(&pdev->dev, "No table_group configured for the node %pOF\n", pdn); + return -1; + } + pci = PCI_DN(pdn); + + /* The default window is restored if not present already on removal of DDW. + * However, if used by VFIO SPAPR sub driver, the user's order of removal of + * windows might have been different to not leading to auto restoration, + * suppose the DDW was removed first followed by the default one. + * So, restore the default window with reset-pe-dma call explicitly. + */ + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_err(&pdev->dev, "couldn't create new IOMMU table\n"); + return -1; + } + + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, offset, + size, IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, 0, 0); + + pci->table_group->tables[0] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + + return 0; +} + +static bool is_default_window_request(struct iommu_table_group *table_group, __u32 page_shift, + __u64 window_size) +{ + if ((window_size <= table_group->tce32_size) && + (page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_create_table(struct iommu_table_group *table_group, int num, + __u32 page_shift, __u64 window_size, __u32 levels, + struct iommu_table **ptbl) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + u32 ddw_avail[DDW_APPLICABLE_SIZE]; + struct ddw_create_response create; + unsigned long liobn, offset, size; + unsigned long start = 0, end = 0; + struct ddw_query_response query; + const __be32 *default_prop; + struct failed_ddw_pdn *fpdn; + unsigned int window_shift; + struct device_node *pdn; + struct iommu_table *tbl; + struct dma_win *window; + struct property *win64; + struct pci_dn *pci; + u64 win_addr; + int len, i; + long ret; + + if (!is_power_of_2(window_size) || levels > 1) + return -EINVAL; + + window_shift = order_base_2(window_size); + + mutex_lock(&dma_win_init_mutex); + + ret = -ENODEV; + + pdn = pci_dma_find_parent_node(pdev, table_group); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + pci = PCI_DN(pdn); + + /* If the enable DDW failed for the pdn, dont retry! */ + list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) { + if (fpdn->pdn == pdn) { + dev_info(&pdev->dev, "%pOF in failed DDW device list\n", pdn); + goto out_unlock; + } + } + + tbl = iommu_pseries_alloc_table(pci->phb->node); + if (!tbl) { + dev_dbg(&pdev->dev, "couldn't create new IOMMU table\n"); + goto out_unlock; + } + + if (num == 0) { + bool direct_mapping; + /* The request is not for default window? Ensure there is no DDW window already */ + if (!is_default_window_request(table_group, page_shift, window_size)) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: 64-bit window already present.", pdn); + ret = -EPERM; + goto out_unlock; + } + } else { + /* Request is for Default window, ensure there is no DDW if there is a + * need to reset. reset-pe otherwise removes the DDW also + */ + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + if (!default_prop) { + if (find_existing_ddw(pdn, &pdev->dev.archdata.dma_offset, &len, + &direct_mapping)) { + dev_warn(&pdev->dev, "%pOF: Attempt to create window#0 when 64-bit window is present. Preventing the attempt as that would destroy the 64-bit window", + pdn); + ret = -EPERM; + goto out_unlock; + } + + restore_default_dma_window(pdev, pdn); + + default_prop = of_get_property(pdn, "ibm,dma-window", NULL); + of_parse_dma_window(pdn, default_prop, &liobn, &offset, &size); + /* Limit the default window size to window_size */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, liobn, + offset, 1UL << window_shift, + IOMMU_PAGE_SHIFT_4K, NULL, + &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, start, end); + + table_group->tables[0] = tbl; + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + } + } + } + + ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", + &ddw_avail[0], DDW_APPLICABLE_SIZE); + if (ret) { + dev_info(&pdev->dev, "ibm,ddw-applicable not found\n"); + goto out_failed; + } + ret = -ENODEV; + + pr_err("%s: Calling query %pOF\n", __func__, pdn); + ret = query_ddw(pdev, ddw_avail, &query, pdn); + if (ret) + goto out_failed; + ret = -ENODEV; + + len = window_shift; + if (query.largest_available_block < (1ULL << (len - page_shift))) { + dev_dbg(&pdev->dev, "can't map window 0x%llx with %llu %llu-sized pages\n", + 1ULL << len, query.largest_available_block, + 1ULL << page_shift); + ret = -EINVAL; /* Retry with smaller window size */ + goto out_unlock; + } + + if (create_ddw(pdev, ddw_avail, &create, page_shift, len)) { + pr_err("%s: Create ddw failed %pOF\n", __func__, pdn); + goto out_failed; + } + + win_addr = ((u64)create.addr_hi << 32) | create.addr_lo; + win64 = ddw_property_create(DMA64_PROPNAME, create.liobn, win_addr, page_shift, len); + if (!win64) + goto remove_window; + + ret = of_add_property(pdn, win64); + if (ret) { + dev_err(&pdev->dev, "unable to add DMA window property for %pOF: %ld", pdn, ret); + goto free_property; + } + ret = -ENODEV; + + window = ddw_list_new_entry(pdn, win64->value); + if (!window) + goto remove_property; + + window->direct = false; + + for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) { + const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM; + + /* Look for MMIO32 */ + if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) { + start = pci->phb->mem_resources[i].start; + end = pci->phb->mem_resources[i].end; + break; + } + } + + /* New table for using DDW instead of the default DMA window */ + iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr, + 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); + iommu_init_table(tbl, pci->phb->node, start, end); + + pci->table_group->tables[num] = tbl; + set_iommu_table_base(&pdev->dev, tbl); + pdev->dev.archdata.dma_offset = win_addr; + + spin_lock(&dma_win_list_lock); + list_add(&window->list, &dma_win_list); + spin_unlock(&dma_win_list_lock); + + mutex_unlock(&dma_win_init_mutex); + + goto exit; + +remove_property: + of_remove_property(pdn, win64); +free_property: + kfree(win64->name); + kfree(win64->value); + kfree(win64); +remove_window: + __remove_dma_window(pdn, ddw_avail, create.liobn); + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +exit: + /* Allocate the userspace view */ + pseries_tce_iommu_userspace_view_alloc(tbl); + tbl->it_allocated_size = spapr_tce_get_table_size(page_shift, window_size, levels); + + *ptbl = iommu_tce_table_get(tbl); + + return 0; +} + +static bool is_default_window_table(struct iommu_table_group *table_group, struct iommu_table *tbl) +{ + if (((tbl->it_size << tbl->it_page_shift) <= table_group->tce32_size) && + (tbl->it_page_shift == IOMMU_PAGE_SHIFT_4K)) + return true; + + return false; +} + +static long spapr_tce_set_window(struct iommu_table_group *table_group, + int num, struct iommu_table *tbl) +{ + return tbl == table_group->tables[num] ? 0 : -EPERM; +} + +static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num) +{ + struct pci_dev *pdev = iommu_group_get_first_pci_dev(table_group->group); + struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; + struct iommu_table *tbl = table_group->tables[num]; + struct failed_ddw_pdn *fpdn; + struct dma_win *window; + const char *win_name; + int ret = -ENODEV; + + mutex_lock(&dma_win_init_mutex); + + if ((num == 0) && is_default_window_table(table_group, tbl)) + win_name = "ibm,dma-window"; + else + win_name = DMA64_PROPNAME; + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + goto out_failed; + } + + /* Dont clear the TCEs, User should have done it */ + if (remove_dma_window_named(pdn, true, win_name, false)) { + pr_err("%s: The existing DDW removal failed for node %pOF\n", __func__, pdn); + goto out_failed; /* Could not remove it either! */ + } + + if (strcmp(win_name, DMA64_PROPNAME) == 0) { + spin_lock(&dma_win_list_lock); + list_for_each_entry(window, &dma_win_list, list) { + if (window->device == pdn) { + list_del(&window->list); + kfree(window); + break; + } + } + spin_unlock(&dma_win_list_lock); + } + + iommu_tce_table_put(table_group->tables[num]); + table_group->tables[num] = NULL; + + ret = 0; + + goto out_unlock; + +out_failed: + fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL); + if (!fpdn) + goto out_unlock; + fpdn->pdn = pdn; + list_add(&fpdn->list, &failed_ddw_pdn_list); + +out_unlock: + mutex_unlock(&dma_win_init_mutex); + + return ret; +} + +static long spapr_tce_take_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + struct pci_dev *pdev = to_pci_dev(dev); + struct device_node *dn = pci_device_to_OF_node(pdev); + struct device_node *pdn; + + /* SRIOV VFs using direct map by the host driver OR multifunction devices + * where the ownership was taken on the attempt by the first function + */ + if (!tbl && (table_group->max_dynamic_windows_supported != 1)) + return 0; + + mutex_lock(&dma_win_init_mutex); + + pdn = pci_dma_find(dn, NULL); + if (!pdn || !PCI_DN(pdn)) { /* Niether of 32s|64-bit exist! */ + dev_warn(&pdev->dev, "No dma-windows exist for the node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* + * Though rtas call reset-pe removes the DDW, it doesn't clear the entries on the table + * if there are any. In case of direct map, the entries will be left over, which + * is fine for PEs with 2 DMA windows where the second window is created with create-pe + * at which point the table is cleared. However, on VFs having only one DMA window, the + * default window would end up seeing the entries left over from the direct map done + * on the second window. So, remove the ddw explicitly so that clean_dma_window() + * cleans up the entries if any. + */ + if (remove_dynamic_dma_windows(pdev, pdn)) { + dev_warn(&pdev->dev, "The existing DDW removal failed for node %pOF\n", pdn); + mutex_unlock(&dma_win_init_mutex); + return -1; + } + + /* The table_group->tables[0] is not null now, it must be the default window + * Remove it, let the userspace create it as it needs. + */ + if (table_group->tables[0]) { + remove_dma_window_named(pdn, true, "ibm,dma-window", true); + iommu_tce_table_put(tbl); + table_group->tables[0] = NULL; + } + set_iommu_table_base(dev, NULL); + + mutex_unlock(&dma_win_init_mutex); + + return 0; +} + +static void spapr_tce_release_ownership(struct iommu_table_group *table_group, struct device *dev) +{ + struct iommu_table *tbl = table_group->tables[0]; + + if (tbl) { /* Default window already restored */ + return; + } + + mutex_lock(&dma_win_init_mutex); + + /* Restore the default window */ + pseries_setup_default_iommu_config(table_group, dev); + + mutex_unlock(&dma_win_init_mutex); + + return; +} + +static struct iommu_table_group_ops spapr_tce_table_group_ops = { + .get_table_size = spapr_tce_get_table_size, + .create_table = spapr_tce_create_table, + .set_window = spapr_tce_set_window, + .unset_window = spapr_tce_unset_window, + .take_ownership = spapr_tce_take_ownership, + .release_ownership = spapr_tce_release_ownership, +}; +#endif + static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -1715,8 +2367,8 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti * we have to remove the property when releasing * the device node. */ - if (remove_ddw(np, false, DIRECT64_PROPNAME)) - remove_ddw(np, false, DMA64_PROPNAME); + if (remove_dma_window_named(np, false, DIRECT64_PROPNAME, true)) + remove_dma_window_named(np, false, DMA64_PROPNAME, true); if (pci && pci->table_group) iommu_pseries_free_group(pci->table_group, diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 98096bbfd62e8d503a3bcf2029dfdf06eed8da18..c0d10c149661cbec62733306f9e81c4ce0341d67 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -243,9 +242,6 @@ static void __init allocate_dart(void) if (!dart_tablebase) panic("Failed to allocate 16MB below 2GB for DART table\n"); - /* There is no point scanning the DART space for leaks*/ - kmemleak_no_scan((void *)dart_tablebase); - /* Allocate a spare page to map all invalid DART pages. We need to do * that to work around what looks like a problem with the HT bridge * prefetching into invalid pages and corrupting data diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 32031a7d96d4f6396f47043c786c8a3997bac12f..cf19010b117234abcec02705b2bd6dbf077daf1f 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -129,8 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ALT_CMO_OP_VPA(flush, flush_addr, paddr, size, riscv_cbom_block_size); } -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, TAINT_CPU_OUT_OF_SPEC, diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 7a2ed154a5e1f98c4876686babf251756b6153b2..5036f13ab69f632f7bed3a1449572b26feb5335c 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -50,6 +50,13 @@ static inline struct irq_domain *arch_get_ir_parent_domain(void) return x86_vector_domain; } +extern bool enable_posted_msi; + +static inline bool posted_msi_supported(void) +{ + return enable_posted_msi && irq_remapping_cap(IRQ_POSTING_CAP); +} + #else /* CONFIG_IRQ_REMAP */ static inline bool irq_remapping_cap(enum irq_remap_cap cap) { return 0; } diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 2b433325ca8fc31517455af10e40a31c12dfb220..c25d40cbbdbe0ed4264d34d14d3cf493fdd92c57 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -159,7 +159,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o - obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index b5f5e0916894406261efd7312efb607ec7ab29cb..ddb857d94ed91e3f6518b376ccc46535ada7f302 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -696,7 +696,6 @@ static void quirk_zhaoxin_dma_patch(int num, int slot, int func) revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); if (revision == 0x10) { is_zhaoxin_kh40000 = true; - dma_ops = &kh40000_dma_direct_ops; pr_info("zhaoxin direct dma patch enabled\n"); } } diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c deleted file mode 100644 index e8dd3bd43e72f39c722d378bdcff44c7062832d4..0000000000000000000000000000000000000000 --- a/arch/x86/kernel/zhaoxin_kh40000.c +++ /dev/null @@ -1,351 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "../../../kernel/dma/direct.h" - -/*** - * usage: - * set "zhaoxin_patch_bitmask=" in cmdline - * value description: - * bit 0: enable(1) node check or not(0). default 1 - */ -enum { - ZHAOXIN_P2CW_NODE_CHECK = BIT(0), - ZHAOXIN_PATCH_CODE_MAX = ZHAOXIN_P2CW_NODE_CHECK, -}; - -#define ZHAOXIN_PATCH_CODE_DEFAULT ZHAOXIN_P2CW_NODE_CHECK - -unsigned long zhaoxin_patch_code = ZHAOXIN_PATCH_CODE_DEFAULT; - -static int __init zhaoxin_patch_code_setup(char *str) -{ - int err = kstrtoul(str, 0, &zhaoxin_patch_code); - - if (err || (zhaoxin_patch_code > ZHAOXIN_PATCH_CODE_MAX)) { - pr_err("cmdline 'zhaoxin_patch_bitmask=%s' inappropriate\n", - str); - return err; - } - - if (ZHAOXIN_P2CW_NODE_CHECK | zhaoxin_patch_code) - pr_info("zhaoxin dma patch node check is enabled\n"); - - return 0; -} -__setup("zhaoxin_patch_bitmask=", zhaoxin_patch_code_setup); - -static struct pci_dev *kh40000_get_pci_dev(struct device *dev) -{ - if (dev_is_pci(dev)) - return to_pci_dev(dev); - - if (dev->parent) - return kh40000_get_pci_dev(dev->parent); - - return NULL; -} - -static void kh40000_sync_single_dma_for_cpu(struct device *dev, dma_addr_t paddr, - enum dma_data_direction dir, bool is_iommu) -{ - u8 vid; - struct pci_dev *pci; - u64 dma_mask = *dev->dma_mask; - - /* check direction */ - if ((dir != DMA_FROM_DEVICE) && (dir != DMA_BIDIRECTIONAL)) - return; - - /* check dma capability */ - if (dma_mask <= DMA_BIT_MASK(32)) - return; - - /* check device type */ - pci = kh40000_get_pci_dev(dev); - if (pci == NULL) - return; - - /* get real physical address */ - if (is_iommu) { - struct iommu_domain *domain = iommu_get_dma_domain(dev); - - paddr = iommu_iova_to_phys(domain, paddr); - if (!paddr) - return; - } - - /* check node or not */ - if ((zhaoxin_patch_code & ZHAOXIN_P2CW_NODE_CHECK) - && pfn_to_nid(PFN_DOWN(paddr)) == dev_to_node(dev)) - return; - - /* flush data by one pci read cycle */ - pci_read_config_byte(pci, PCI_VENDOR_ID, &vid); -} - -/* zhaoxin kh-40000 direct dma ops */ -static void *kh40000_dma_direct_alloc(struct device *dev, size_t size, - dma_addr_t *addr, gfp_t gfp, unsigned long attrs) -{ - if (dev->coherent_dma_mask > DMA_BIT_MASK(32)) - gfp |= __GFP_THISNODE; - - return dma_direct_alloc(dev, size, addr, gfp, attrs); -} - -static void kh40000_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); - dma_direct_unmap_page(dev, addr, size, dir, attrs); -} - -static void kh40000_dma_direct_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); - - dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); -} - -static void kh40000_dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); - dma_direct_sync_single_for_cpu(dev, addr, size, dir); -} - -static void kh40000_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); - - dma_direct_unmap_sg(dev, sgl, nents, dir, attrs); -} - -static void kh40000_dma_direct_unmap_resource(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); -} - -const struct dma_map_ops kh40000_dma_direct_ops = { - .flags = DMA_F_PCI_P2PDMA_SUPPORTED, - .alloc = kh40000_dma_direct_alloc, - .sync_sg_for_cpu = kh40000_dma_direct_sync_sg_for_cpu, - .unmap_page = kh40000_dma_direct_unmap_page, - .sync_single_for_cpu = kh40000_dma_direct_sync_single_for_cpu, - .unmap_sg = kh40000_dma_direct_unmap_sg, - .unmap_resource = kh40000_dma_direct_unmap_resource, - .dma_supported = dma_direct_supported, - .free = dma_direct_free, - .alloc_pages = dma_direct_alloc_pages, - .free_pages = dma_direct_free_pages, - .sync_single_for_device = dma_direct_sync_single_for_device, - .sync_sg_for_device = dma_direct_sync_sg_for_device, - .get_required_mask = dma_direct_get_required_mask, - .max_mapping_size = dma_direct_max_mapping_size, - .mmap = dma_direct_mmap, - .get_sgtable = dma_direct_get_sgtable, - .map_page = dma_direct_map_page, - .map_sg = dma_direct_map_sg, - .map_resource = dma_direct_map_resource, -}; - -/* zhaoxin kh-40000 iommu dma ops */ -static const struct dma_map_ops *iommu_dma_ops; - -static void *kh40000_iommu_dma_alloc(struct device *dev, size_t size, - dma_addr_t *addr, gfp_t gfp, unsigned long attrs) -{ - gfp |= __GFP_THISNODE; - - return iommu_dma_ops->alloc(dev, size, addr, gfp, attrs); -} - -static void kh40000_iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs) -{ - iommu_dma_ops->free(dev, size, cpu_addr, handle, attrs); -} - -static struct page *kh40000_dma_common_alloc_pages(struct device *dev, size_t size, - dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) -{ - return iommu_dma_ops->alloc_pages(dev, size, dma_handle, dir, gfp); -} - -static void kh40000_dma_common_free_pages(struct device *dev, size_t size, struct page *page, - dma_addr_t dma_handle, enum dma_data_direction dir) -{ - iommu_dma_ops->free_pages(dev, size, page, dma_handle, dir); -} - -static struct sg_table *kh40000_iommu_dma_alloc_noncontiguous(struct device *dev, - size_t size, enum dma_data_direction dir, gfp_t gfp, - unsigned long attrs) -{ - return iommu_dma_ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); -} - -static void kh40000_iommu_dma_free_noncontiguous(struct device *dev, size_t size, - struct sg_table *sgt, enum dma_data_direction dir) -{ - return iommu_dma_ops->free_noncontiguous(dev, size, sgt, dir); -} - -static int kh40000_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return iommu_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} - -static void kh40000_iommu_dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); - iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs); -} - -static int kh40000_iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return iommu_dma_ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); -} - -static dma_addr_t kh40000_iommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs); -} - -static int kh40000_iommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs); -} - -static void kh40000_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nelems, i) - kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); - iommu_dma_ops->unmap_sg(dev, sgl, nelems, dir, attrs); -} - -static void kh40000_iommu_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); - iommu_dma_ops->sync_single_for_cpu(dev, addr, size, dir); -} - -static void kh40000_iommu_dma_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - iommu_dma_ops->sync_single_for_device(dev, addr, size, dir); -} - -static void kh40000_iommu_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nelems, i) - kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); - iommu_dma_ops->sync_sg_for_cpu(dev, sgl, nelems, dir); -} - -static void kh40000_iommu_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) -{ - iommu_dma_ops->sync_sg_for_device(dev, sgl, nelems, dir); -} - -static dma_addr_t kh40000_iommu_dma_map_resource(struct device *dev, phys_addr_t phys, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - return iommu_dma_ops->map_resource(dev, phys, size, dir, attrs); -} - -static void kh40000_iommu_dma_unmap_resource(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); - iommu_dma_ops->unmap_resource(dev, addr, size, dir, attrs); -} - -static unsigned long kh40000_iommu_dma_get_merge_boundary(struct device *dev) -{ - return iommu_dma_ops->get_merge_boundary(dev); -} - -static size_t kh40000_iommu_dma_opt_mapping_size(void) -{ - return iommu_dma_ops->opt_mapping_size(); -} - -const struct dma_map_ops kh40000_dma_iommu_ops = { - .flags = DMA_F_PCI_P2PDMA_SUPPORTED, - .alloc = kh40000_iommu_dma_alloc, - .free = kh40000_iommu_dma_free, - .unmap_page = kh40000_iommu_dma_unmap_page, - .alloc_pages = kh40000_dma_common_alloc_pages, - .free_pages = kh40000_dma_common_free_pages, - .alloc_noncontiguous = kh40000_iommu_dma_alloc_noncontiguous, - .free_noncontiguous = kh40000_iommu_dma_free_noncontiguous, - .mmap = kh40000_iommu_dma_mmap, - .get_sgtable = kh40000_iommu_dma_get_sgtable, - .map_page = kh40000_iommu_dma_map_page, - .map_sg = kh40000_iommu_dma_map_sg, - .unmap_sg = kh40000_iommu_dma_unmap_sg, - .sync_single_for_cpu = kh40000_iommu_dma_sync_single_for_cpu, - .sync_single_for_device = kh40000_iommu_dma_sync_single_for_device, - .sync_sg_for_cpu = kh40000_iommu_dma_sync_sg_for_cpu, - .sync_sg_for_device = kh40000_iommu_dma_sync_sg_for_device, - .map_resource = kh40000_iommu_dma_map_resource, - .unmap_resource = kh40000_iommu_dma_unmap_resource, - .get_merge_boundary = kh40000_iommu_dma_get_merge_boundary, - .opt_mapping_size = kh40000_iommu_dma_opt_mapping_size, -}; - -void kh40000_set_iommu_dma_ops(struct device *dev) -{ - if (dev->dma_ops) { - iommu_dma_ops = dev->dma_ops; - set_dma_ops(dev, &kh40000_dma_iommu_ops); - pr_info_once("zhaoxin iommu dma patch enabled\n"); - } -} diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c index 93d796531af307d59965e7bf62a2d70a1fa13a75..52b2abf88689824bad7d08ae83515cb5b2ee1502 100644 --- a/drivers/acpi/arm64/dma.c +++ b/drivers/acpi/arm64/dma.c @@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev) { int ret; u64 end, mask; - u64 size = 0; const struct bus_dma_region *map = NULL; /* @@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev) } if (dev->coherent_dma_mask) - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + end = dev->coherent_dma_mask; else - size = 1ULL << 32; + end = (1ULL << 32) - 1; ret = acpi_dma_get_range(dev, &map); if (!ret && map) { - const struct bus_dma_region *r = map; - - for (end = 0; r->size; r++) { - if (r->dma_start + r->size - 1 > end) - end = r->dma_start + r->size - 1; - } - - size = end + 1; + end = dma_range_map_max(map); dev->dma_range_map = map; } if (ret == -ENODEV) - ret = iort_dma_get_ranges(dev, &size); + ret = iort_dma_get_ranges(dev, &end); if (!ret) { /* * Limit coherent and dma mask based on size retrieved from * firmware. */ - end = size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 1a31106a14e446c78a546d1fc8342de8ffdbaf58..ab91b4f591c872fa1276ed9d85d20d1b54308c6c 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1221,10 +1221,10 @@ static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node) static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, u32 streamid) { - const struct iommu_ops *ops; struct fwnode_handle *iort_fwnode; - if (!node) + /* If there's no SMMU driver at all, give up now */ + if (!node || !iort_iommu_driver_enabled(node->type)) return -ENODEV; iort_fwnode = iort_get_fwnode(node); @@ -1232,19 +1232,10 @@ static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, return -ENODEV; /* - * If the ops look-up fails, this means that either - * the SMMU drivers have not been probed yet or that - * the SMMU drivers are not built in the kernel; - * Depending on whether the SMMU drivers are built-in - * in the kernel or not, defer the IOMMU configuration - * or just abort it. + * If the SMMU drivers are enabled but not loaded/probed + * yet, this will defer. */ - ops = iommu_ops_from_fwnode(iort_fwnode); - if (!ops) - return iort_iommu_driver_enabled(node->type) ? - -EPROBE_DEFER : -ENODEV; - - return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops); + return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode); } struct iort_pci_alias_info { @@ -1367,7 +1358,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) { return -ENODEV; } #endif -static int nc_dma_get_range(struct device *dev, u64 *size) +static int nc_dma_get_range(struct device *dev, u64 *limit) { struct acpi_iort_node *node; struct acpi_iort_named_component *ncomp; @@ -1384,13 +1375,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } - *size = ncomp->memory_address_limit >= 64 ? U64_MAX : - 1ULL<memory_address_limit; + *limit = ncomp->memory_address_limit >= 64 ? U64_MAX : + (1ULL << ncomp->memory_address_limit) - 1; return 0; } -static int rc_dma_get_range(struct device *dev, u64 *size) +static int rc_dma_get_range(struct device *dev, u64 *limit) { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; @@ -1408,8 +1399,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } - *size = rc->memory_address_limit >= 64 ? U64_MAX : - 1ULL<memory_address_limit; + *limit = rc->memory_address_limit >= 64 ? U64_MAX : + (1ULL << rc->memory_address_limit) - 1; return 0; } @@ -1417,16 +1408,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) /** * iort_dma_get_ranges() - Look up DMA addressing limit for the device * @dev: device to lookup - * @size: DMA range size result pointer + * @limit: DMA limit result pointer * * Return: 0 on success, an error otherwise. */ -int iort_dma_get_ranges(struct device *dev, u64 *size) +int iort_dma_get_ranges(struct device *dev, u64 *limit) { if (dev_is_pci(dev)) - return rc_dma_get_range(dev, size); + return rc_dma_get_range(dev, limit); else - return nc_dma_get_range(dev, size); + return nc_dma_get_range(dev, limit); } static void __init acpi_iort_register_irq(int hwirq, const char *name, diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index a89bdbe0018442f0c7d57dc6d920f078c4078128..c6e95bb8cccac5af08ccba40aa947dab802bb5df 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -88,43 +88,29 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event, enum dock_callback_type cb_type) { struct acpi_device *adev = dd->adev; + acpi_hp_fixup fixup = NULL; + acpi_hp_uevent uevent = NULL; + acpi_hp_notify notify = NULL; acpi_lock_hp_context(); - if (!adev->hp) - goto out; - - if (cb_type == DOCK_CALL_FIXUP) { - void (*fixup)(struct acpi_device *); - - fixup = adev->hp->fixup; - if (fixup) { - acpi_unlock_hp_context(); - fixup(adev); - return; - } - } else if (cb_type == DOCK_CALL_UEVENT) { - void (*uevent)(struct acpi_device *, u32); - - uevent = adev->hp->uevent; - if (uevent) { - acpi_unlock_hp_context(); - uevent(adev, event); - return; - } - } else { - int (*notify)(struct acpi_device *, u32); - - notify = adev->hp->notify; - if (notify) { - acpi_unlock_hp_context(); - notify(adev, event); - return; - } + if (adev->hp) { + if (cb_type == DOCK_CALL_FIXUP) + fixup = adev->hp->fixup; + else if (cb_type == DOCK_CALL_UEVENT) + uevent = adev->hp->uevent; + else + notify = adev->hp->notify; } - out: acpi_unlock_hp_context(); + + if (fixup) + fixup(adev); + else if (uevent) + uevent(adev, event); + else if (notify) + notify(adev, event); } static struct dock_station *find_dock_station(acpi_handle handle) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index ff7078afb9d50ec5ff86bea9c3528dbbeaeb8c7a..be25e7015523d113279a50ae428831fae179cf65 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -73,8 +73,7 @@ void acpi_unlock_hp_context(void) void acpi_initialize_hp_context(struct acpi_device *adev, struct acpi_hotplug_context *hp, - int (*notify)(struct acpi_device *, u32), - void (*uevent)(struct acpi_device *, u32)) + acpi_hp_notify notify, acpi_hp_uevent uevent) { acpi_lock_hp_context(); hp->notify = notify; @@ -390,7 +389,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) } else if (adev->flags.hotplug_notify) { error = acpi_generic_hotplug_event(adev, src); } else { - int (*notify)(struct acpi_device *, u32); + acpi_hp_notify notify; acpi_lock_hp_context(); notify = adev->hp ? adev->hp->notify : NULL; @@ -1262,10 +1261,10 @@ const char *acpi_device_hid(struct acpi_device *device) { struct acpi_hardware_id *hid; - if (list_empty(&device->pnp.ids)) + hid = list_first_entry_or_null(&device->pnp.ids, struct acpi_hardware_id, list); + if (!hid) return dummy_hid; - hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list); return hid->id; } EXPORT_SYMBOL(acpi_device_hid); @@ -1543,37 +1542,25 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) #ifdef CONFIG_IOMMU_API int acpi_iommu_fwspec_init(struct device *dev, u32 id, - struct fwnode_handle *fwnode, - const struct iommu_ops *ops) + struct fwnode_handle *fwnode) { - int ret = iommu_fwspec_init(dev, fwnode, ops); - - if (!ret) - ret = iommu_fwspec_add_ids(dev, &id, 1); - - return ret; -} + int ret; -static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev) -{ - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + ret = iommu_fwspec_init(dev, fwnode); + if (ret) + return ret; - return fwspec ? fwspec->ops : NULL; + return iommu_fwspec_add_ids(dev, &id, 1); } static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in) { int err; - const struct iommu_ops *ops; /* Serialise to make dev->iommu stable under our potential fwspec */ mutex_lock(&iommu_probe_device_lock); - /* - * If we already translated the fwspec there is nothing left to do, - * return the iommu_ops. - */ - ops = acpi_iommu_fwspec_ops(dev); - if (ops) { + /* If we already translated the fwspec there is nothing left to do */ + if (dev_iommu_fwspec_get(dev)) { mutex_unlock(&iommu_probe_device_lock); return 0; } @@ -1590,23 +1577,13 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in) if (!err && dev->bus) err = iommu_probe_device(dev); - /* Ignore all other errors apart from EPROBE_DEFER */ - if (err == -EPROBE_DEFER) { - return err; - } else if (err) { - dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); - return -ENODEV; - } - if (!acpi_iommu_fwspec_ops(dev)) - return -ENODEV; - return 0; + return err; } #else /* !CONFIG_IOMMU_API */ int acpi_iommu_fwspec_init(struct device *dev, u32 id, - struct fwnode_handle *fwnode, - const struct iommu_ops *ops) + struct fwnode_handle *fwnode) { return -ENODEV; } @@ -1636,16 +1613,14 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, acpi_arch_dma_setup(dev); + /* Ignore all other errors apart from EPROBE_DEFER */ ret = acpi_iommu_configure_id(dev, input_id); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; + if (ret) + dev_dbg(dev, "Adding to IOMMU failed: %d\n", ret); - /* - * Historically this routine doesn't fail driver probing due to errors - * in acpi_iommu_configure_id() - */ - - arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT); return 0; } diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c index c8025921c129b22cf5e4ce49217c178d8b68bd40..2aa69a2fba73b64816ca4b3b86b88de6ea025911 100644 --- a/drivers/acpi/viot.c +++ b/drivers/acpi/viot.c @@ -307,21 +307,14 @@ void __init acpi_viot_init(void) static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu, u32 epid) { - const struct iommu_ops *ops; - - if (!viommu) + if (!viommu || !IS_ENABLED(CONFIG_VIRTIO_IOMMU)) return -ENODEV; /* We're not translating ourself */ if (device_match_fwnode(dev, viommu->fwnode)) return -EINVAL; - ops = iommu_ops_from_fwnode(viommu->fwnode); - if (!ops) - return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ? - -EPROBE_DEFER : -ENODEV; - - return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops); + return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode); } static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 25c55766021504951427c5d9e89e19a3102aa821..20136d00c098b624d73dfbc7596eaabb98743fa9 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -662,7 +662,7 @@ static int idxd_enable_system_pasid(struct idxd_device *idxd) * DMA domain is owned by the driver, it should support all valid * types such as DMA-FQ, identity, etc. */ - ret = iommu_attach_device_pasid(domain, dev, pasid); + ret = iommu_attach_device_pasid(domain, dev, pasid, NULL); if (ret) { dev_err(dev, "failed to attach device pasid %d, domain type %d", pasid, domain->type); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index d5512037c38bcd7ca807aaf281dceaebdd688a4e..2a94e82316f95c5f9dcc37ef0a4664a29e3492b2 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -407,10 +407,13 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks) struct msm_iommu *iommu; int ret; - domain = iommu_domain_alloc(dev->bus); - if (!domain) + if (!device_iommu_mapped(dev)) return NULL; + domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(domain)) + return ERR_CAST(domain); + iommu_set_pgtable_quirks(domain, quirks); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 7c6699aed7d2ae48401f6614203a95062d8ebbb7..5b4d41ed8e95c1bbcfff3948871a9e1becdf5672 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -379,9 +379,10 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) if (err < 0) goto put_group; - host->domain = iommu_domain_alloc(&platform_bus_type); - if (!host->domain) { - err = -ENOMEM; + host->domain = iommu_paging_domain_alloc(host->dev); + if (IS_ERR(host->domain)) { + err = PTR_ERR(host->domain); + host->domain = NULL; goto put_cache; } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index fd938b6dfa7ed4f405861a6be38d7e8880f0bae2..5561714313d7c7a6d7a434609aae311e8b69a785 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -485,11 +485,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap); void hv_setup_dma_ops(struct device *dev, bool coherent) { - /* - * Hyper-V does not offer a vIOMMU in the guest - * VM, so pass 0/NULL for the IOMMU settings - */ - arch_setup_dma_ops(dev, 0, 0, coherent); + arch_setup_dma_ops(dev, coherent); } EXPORT_SYMBOL_GPL(hv_setup_dma_ops); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 9cb6193b3aaf5a78a6c54f04db3607f24ab81405..2d92a58c76e2950ac6f0da7047adb80ea0b2cebb 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -377,7 +377,7 @@ config ARM_SMMU_QCOM config ARM_SMMU_QCOM_DEBUG bool "ARM SMMU QCOM implementation defined debug support" - depends on ARM_SMMU_QCOM + depends on ARM_SMMU_QCOM=y help Support for implementation specific debug features in ARM SMMU hardware found in QTI platforms. This include support for @@ -395,6 +395,7 @@ config ARM_SMMU_V3 select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select GENERIC_MSI_IRQ + select IOMMUFD_DRIVER if IOMMUFD help Support for implementations of the ARM System MMU architecture version 3 providing translation support to a PCIe root complex. @@ -402,9 +403,9 @@ config ARM_SMMU_V3 Say Y here if your system includes an IOMMU device implementing the ARM SMMUv3 architecture. +if ARM_SMMU_V3 config ARM_SMMU_V3_SVA bool "Shared Virtual Addressing support for the ARM SMMUv3" - depends on ARM_SMMU_V3 select IOMMU_SVA select IOMMU_IOPF select MMU_NOTIFIER @@ -415,6 +416,17 @@ config ARM_SMMU_V3_SVA Say Y here if your system supports SVA extensions such as PCIe PASID and PRI. +config ARM_SMMU_V3_KUNIT_TEST + tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on ARM_SMMU_V3_SVA + default KUNIT_ALL_TESTS + help + Enable this option to unit-test arm-smmu-v3 driver functions. + + If unsure, say N. +endif + config S390_IOMMU def_bool y if S390 && PCI depends on S390 && PCI diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 443b2c13c37b5979ae574c6c7ab13ac2ce6b60f1..994063e5586f0f74dfed1e97f46d7c17163a0a24 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -7,9 +7,12 @@ config AMD_IOMMU select PCI_ATS select PCI_PRI select PCI_PASID + select MMU_NOTIFIER select IOMMU_API select IOMMU_IOVA select IOMMU_IO_PGTABLE + select IOMMU_SVA + select IOMMU_IOPF select IOMMUFD_DRIVER if IOMMUFD depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile index f454fbb1569eb9d792338178faf9611ccb870635..9de33b2d42f526cc713644defc3fdfbdb0b841c0 100644 --- a/drivers/iommu/amd/Makefile +++ b/drivers/iommu/amd/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o +obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 325f63923257e98d7f0178b02d55770c6f7e8d03..4abfab24585d2c06f4bc36e253a02583cca76c14 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -17,10 +17,16 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data); irqreturn_t amd_iommu_int_thread_galog(int irq, void *data); irqreturn_t amd_iommu_int_handler(int irq, void *data); void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); +void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, + u8 cntrl_intr, u8 cntrl_log, + u32 status_run_mask, u32 status_overflow_mask); void amd_iommu_restart_event_logging(struct amd_iommu *iommu); void amd_iommu_restart_ga_log(struct amd_iommu *iommu); void amd_iommu_restart_ppr_log(struct amd_iommu *iommu); void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); +void iommu_feature_enable(struct amd_iommu *iommu, u8 bit); +void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, + gfp_t gfp, size_t size); #ifdef CONFIG_AMD_IOMMU_DEBUGFS void amd_iommu_debugfs_setup(struct amd_iommu *iommu); @@ -38,17 +44,42 @@ extern int amd_iommu_guest_ir; extern enum io_pgtable_fmt amd_iommu_pgtable; extern int amd_iommu_gpt_level; -bool amd_iommu_v2_supported(void); - -/* Device capabilities */ -int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev); -void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev); +/* Protection domain ops */ +struct protection_domain *protection_domain_alloc(unsigned int type); +void protection_domain_free(struct protection_domain *domain); +struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, + struct mm_struct *mm); +void amd_iommu_domain_free(struct iommu_domain *dom); +int iommu_sva_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid); +void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain); + +/* SVA/PASID */ +bool amd_iommu_pasid_supported(void); + +/* IOPF */ +int amd_iommu_iopf_init(struct amd_iommu *iommu); +void amd_iommu_iopf_uninit(struct amd_iommu *iommu); +void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *resp); +int amd_iommu_iopf_add_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data); +void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data); /* GCR3 setup */ int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, unsigned long gcr3); int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid); +/* PPR */ +int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu); +void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu); +void amd_iommu_enable_ppr_log(struct amd_iommu *iommu); +void amd_iommu_poll_ppr_log(struct amd_iommu *iommu); +int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag); + /* * This function flushes all internal caches of * the IOMMU used by this driver. @@ -56,6 +87,7 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid); void amd_iommu_flush_all_caches(struct amd_iommu *iommu); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain); +void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set); void amd_iommu_domain_flush_complete(struct protection_domain *domain); void amd_iommu_domain_flush_pages(struct protection_domain *domain, u64 address, size_t size); @@ -73,9 +105,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) } #endif -int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, - int status, int tag); - static inline bool is_rd890_iommu(struct pci_dev *pdev) { return (pdev->vendor == PCI_VENDOR_ID_ATI) && @@ -100,7 +129,8 @@ static inline int check_feature_gpt_level(void) static inline bool amd_iommu_gt_ppr_supported(void) { return (check_feature(FEATURE_GT) && - check_feature(FEATURE_PPR)); + check_feature(FEATURE_PPR) && + check_feature(FEATURE_EPHSUP)); } static inline u64 iommu_virt_to_phys(void *vaddr) @@ -149,6 +179,11 @@ static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_dat return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu); } +static inline struct protection_domain *to_pdomain(struct iommu_domain *dom) +{ + return container_of(dom, struct protection_domain, domain); +} + bool translation_pre_enabled(struct amd_iommu *iommu); bool amd_iommu_is_attach_deferred(struct device *dev); int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index d1fed5fc219b344fda4efd22b38b7b01ebebd84c..2b76b5dedc1d9bd324c9abd69451b3cafa280378 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -8,7 +8,9 @@ #ifndef _ASM_X86_AMD_IOMMU_TYPES_H #define _ASM_X86_AMD_IOMMU_TYPES_H +#include #include +#include #include #include #include @@ -251,6 +253,14 @@ #define PPR_ENTRY_SIZE 16 #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) +/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */ +#define PPR_FLAG_EXEC 0x002 /* Execute permission requested */ +#define PPR_FLAG_READ 0x004 /* Read permission requested */ +#define PPR_FLAG_WRITE 0x020 /* Write permission requested */ +#define PPR_FLAG_US 0x040 /* 1: User, 0: Supervisor */ +#define PPR_FLAG_RVSD 0x080 /* Reserved bit not zero */ +#define PPR_FLAG_GN 0x100 /* GVA and PASID is valid */ + #define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) #define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) #define PPR_DEVID(x) ((x) & 0xffffULL) @@ -503,6 +513,11 @@ extern struct kmem_cache *amd_iommu_irq_cache; list_for_each_entry((iommu), &amd_iommu_list, list) #define for_each_iommu_safe(iommu, next) \ list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) +/* Making iterating over protection_domain->dev_data_list easier */ +#define for_each_pdom_dev_data(pdom_dev_data, pdom) \ + list_for_each_entry(pdom_dev_data, &pdom->dev_data_list, list) +#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \ + list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list) struct amd_iommu; struct iommu_domain; @@ -544,6 +559,16 @@ enum protection_domain_mode { PD_MODE_V2, }; +/* Track dev_data/PASID list for the protection domain */ +struct pdom_dev_data { + /* Points to attached device data */ + struct iommu_dev_data *dev_data; + /* PASID attached to the protection domain */ + ioasid_t pasid; + /* For protection_domain->dev_data_list */ + struct list_head list; +}; + /* * This structure contains generic data for IOMMU protection domains * independent of their use. @@ -560,6 +585,9 @@ struct protection_domain { bool dirty_tracking; /* dirty tracking is enabled in the domain */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ + + struct mmu_notifier mn; /* mmu notifier for the SVA domain */ + struct list_head dev_data_list; /* List of pdom_dev_data */ }; /* @@ -762,6 +790,10 @@ struct amd_iommu { /* DebugFS Info */ struct dentry *debugfs; #endif + + /* IOPF support */ + struct iopf_queue *iopf_queue; + unsigned char iopfq_name[32]; }; static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) @@ -813,6 +845,7 @@ struct iommu_dev_data { struct device *dev; u16 devid; /* PCI Device ID */ + u32 max_pasids; /* Max supported PASIDs */ u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */ int ats_qdep; u8 ats_enabled :1; /* ATS state */ diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 62317af56d177b332ecca4e05b3ca03e1dbe074e..d7347e19aabb72cccacadcc4ad6d96d2f7c1c5bf 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -419,7 +419,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu) } /* Generic functions to enable/disable certain features of the IOMMU. */ -static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) +void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) { u64 ctrl; @@ -744,9 +744,9 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) * Interrupt handler has processed all pending events and adjusted head * and tail pointer. Reset overflow mask and restart logging again. */ -static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, - u8 cntrl_intr, u8 cntrl_log, - u32 status_run_mask, u32 status_overflow_mask) +void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type, + u8 cntrl_intr, u8 cntrl_log, + u32 status_run_mask, u32 status_overflow_mask) { u32 status; @@ -787,17 +787,6 @@ void amd_iommu_restart_ga_log(struct amd_iommu *iommu) MMIO_STATUS_GALOG_OVERFLOW_MASK); } -/* - * This function restarts ppr logging in case the IOMMU experienced - * PPR log overflow. - */ -void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) -{ - amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, - CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, - MMIO_STATUS_PPR_OVERFLOW_MASK); -} - /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. @@ -846,8 +835,8 @@ static void __init free_command_buffer(struct amd_iommu *iommu) iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); } -static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, - gfp_t gfp, size_t size) +void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, + size_t size) { int order = get_order(size); void *buf = iommu_alloc_pages(gfp, order); @@ -902,41 +891,6 @@ static void __init free_event_buffer(struct amd_iommu *iommu) iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); } -/* allocates the memory where the IOMMU will log its events to */ -static int __init alloc_ppr_log(struct amd_iommu *iommu) -{ - iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL, PPR_LOG_SIZE); - - return iommu->ppr_log ? 0 : -ENOMEM; -} - -static void iommu_enable_ppr_log(struct amd_iommu *iommu) -{ - u64 entry; - - if (iommu->ppr_log == NULL) - return; - - iommu_feature_enable(iommu, CONTROL_PPR_EN); - - entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; - - memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, - &entry, sizeof(entry)); - - /* set head and tail to zero manually */ - writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); - iommu_feature_enable(iommu, CONTROL_PPRINT_EN); -} - -static void __init free_ppr_log(struct amd_iommu *iommu) -{ - iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE)); -} - static void free_ga_log(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP @@ -1685,9 +1639,10 @@ static void __init free_iommu_one(struct amd_iommu *iommu) free_cwwb_sem(iommu); free_command_buffer(iommu); free_event_buffer(iommu); - free_ppr_log(iommu); + amd_iommu_free_ppr_log(iommu); free_ga_log(iommu); iommu_unmap_mmio_space(iommu); + amd_iommu_iopf_uninit(iommu); } static void __init free_iommu_all(void) @@ -2099,9 +2054,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) amd_iommu_max_glx_val = glxval; else amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); + + iommu_enable_gt(iommu); } - if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu)) + if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu)) return -ENOMEM; if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) { @@ -2157,6 +2114,16 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) if (ret) return ret; + /* + * Allocate per IOMMU IOPF queue here so that in attach device path, + * PRI capable device can be added to IOPF queue + */ + if (amd_iommu_gt_ppr_supported()) { + ret = amd_iommu_iopf_init(iommu); + if (ret) + return ret; + } + iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); return pci_enable_device(iommu->dev); @@ -2832,7 +2799,6 @@ static void early_enable_iommus(void) iommu_disable_irtcachedis(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); - iommu_enable_gt(iommu); iommu_enable_ga(iommu); iommu_enable_xt(iommu); iommu_enable_irtcachedis(iommu); @@ -2842,12 +2808,15 @@ static void early_enable_iommus(void) } } -static void enable_iommus_v2(void) +static void enable_iommus_ppr(void) { struct amd_iommu *iommu; + if (!amd_iommu_gt_ppr_supported()) + return; + for_each_iommu(iommu) - iommu_enable_ppr_log(iommu); + amd_iommu_enable_ppr_log(iommu); } static void enable_iommus_vapic(void) @@ -3190,7 +3159,7 @@ static int amd_iommu_enable_interrupts(void) * PPR and GA log interrupt for all IOMMUs. */ enable_iommus_vapic(); - enable_iommus_v2(); + enable_iommus_ppr(); out: return ret; @@ -3367,7 +3336,7 @@ int amd_iommu_reenable(int mode) return 0; } -int __init amd_iommu_enable_faulting(unsigned int cpu) +int amd_iommu_enable_faulting(unsigned int cpu) { /* We enable MSI later when PCI is initialized */ return 0; @@ -3673,7 +3642,7 @@ __setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_hpet", parse_ivrs_hpet); __setup("ivrs_acpihid", parse_ivrs_acpihid); -bool amd_iommu_v2_supported(void) +bool amd_iommu_pasid_supported(void) { /* CPU page table size should match IOMMU guest page table size */ if (cpu_feature_enabled(X86_FEATURE_LA57) && diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index e1d6ad01c641914fff474714edecc552b5d4a640..3965da0376ce9259ff8810d23f398e760cd8d6d0 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -158,7 +158,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, __npte = set_pgtable_attr(page); /* pte could have been changed somewhere. */ - if (cmpxchg64(pte, __pte, __npte) != __pte) + if (!try_cmpxchg64(pte, &__pte, __npte)) iommu_free_page(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 654a75041ea6878390ee60f4c8f4f7bc66652a88..79c18b3978663c8f70f3abef9618bbcd3dc740a5 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -90,6 +90,21 @@ static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom) return (pdom && (pdom->pd_mode == PD_MODE_V2)); } +static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom) +{ + return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY); +} + +/* + * We cannot support PASID w/ existing v1 page table in the same domain + * since it will be nested. However, existing domain w/ v2 page table + * or passthrough mode can be used for PASID. + */ +static inline bool pdom_is_sva_capable(struct protection_domain *pdom) +{ + return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom); +} + static inline int get_acpihid_device_id(struct device *dev, struct acpihid_map_entry **entry) { @@ -180,11 +195,6 @@ static struct amd_iommu *rlookup_amd_iommu(struct device *dev) return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid)); } -static struct protection_domain *to_pdomain(struct iommu_domain *dom) -{ - return container_of(dom, struct protection_domain, domain); -} - static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) { struct iommu_dev_data *dev_data; @@ -385,7 +395,7 @@ static inline void pdev_disable_cap_ats(struct pci_dev *pdev) } } -int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev) +static inline int pdev_enable_cap_pri(struct pci_dev *pdev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); int ret = -EINVAL; @@ -393,6 +403,9 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev) if (dev_data->pri_enabled) return 0; + if (!dev_data->ats_enabled) + return 0; + if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { /* * First reset the PRI state of the device. @@ -409,7 +422,7 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev) return ret; } -void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev) +static inline void pdev_disable_cap_pri(struct pci_dev *pdev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); @@ -451,15 +464,14 @@ static void pdev_enable_caps(struct pci_dev *pdev) { pdev_enable_cap_ats(pdev); pdev_enable_cap_pasid(pdev); - amd_iommu_pdev_enable_cap_pri(pdev); - + pdev_enable_cap_pri(pdev); } static void pdev_disable_caps(struct pci_dev *pdev) { pdev_disable_cap_ats(pdev); pdev_disable_cap_pasid(pdev); - amd_iommu_pdev_disable_cap_pri(pdev); + pdev_disable_cap_pri(pdev); } /* @@ -819,59 +831,6 @@ static void iommu_poll_events(struct amd_iommu *iommu) writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); } -static void iommu_poll_ppr_log(struct amd_iommu *iommu) -{ - u32 head, tail; - - if (iommu->ppr_log == NULL) - return; - - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - - while (head != tail) { - volatile u64 *raw; - u64 entry[2]; - int i; - - raw = (u64 *)(iommu->ppr_log + head); - - /* - * Hardware bug: Interrupt may arrive before the entry is - * written to memory. If this happens we need to wait for the - * entry to arrive. - */ - for (i = 0; i < LOOP_TIMEOUT; ++i) { - if (PPR_REQ_TYPE(raw[0]) != 0) - break; - udelay(1); - } - - /* Avoid memcpy function-call overhead */ - entry[0] = raw[0]; - entry[1] = raw[1]; - - /* - * To detect the hardware errata 733 we need to clear the - * entry back to zero. This issue does not exist on SNP - * enabled system. Also this buffer is not writeable on - * SNP enabled system. - */ - if (!amd_iommu_snp_en) - raw[0] = raw[1] = 0UL; - - /* Update head pointer of hardware ring-buffer */ - head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; - writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - - /* TODO: PPR Handler will be added when we add IOPF support */ - - /* Refresh ring-buffer information */ - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - } -} - #ifdef CONFIG_IRQ_REMAP static int (*iommu_ga_log_notifier)(u32); @@ -1000,7 +959,7 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) { amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, MMIO_STATUS_PPR_OVERFLOW_MASK, - iommu_poll_ppr_log, amd_iommu_restart_ppr_log); + amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log); return IRQ_HANDLED; } @@ -1674,15 +1633,14 @@ void amd_iommu_domain_update(struct protection_domain *domain) amd_iommu_domain_flush_all(domain); } -int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, - int status, int tag) +int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; struct iommu_cmd cmd; - dev_data = dev_iommu_priv_get(&pdev->dev); - iommu = get_amd_iommu_from_dev(&pdev->dev); + dev_data = dev_iommu_priv_get(dev); + iommu = get_amd_iommu_from_dev(dev); build_complete_ppr(&cmd, dev_data->devid, pasid, status, tag, dev_data->pri_tlp); @@ -2012,6 +1970,73 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) amd_iommu_apply_erratum_63(iommu, devid); } +/* Update and flush DTE for the given device */ +void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set) +{ + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); + + if (set) + set_dte_entry(iommu, dev_data); + else + clear_dte_entry(iommu, dev_data->devid); + + clone_aliases(iommu, dev_data->dev); + device_flush_dte(dev_data); + iommu_completion_wait(iommu); +} + +/* + * If domain is SVA capable then initialize GCR3 table. Also if domain is + * in v2 page table mode then update GCR3[0]. + */ +static int init_gcr3_table(struct iommu_dev_data *dev_data, + struct protection_domain *pdom) +{ + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + int max_pasids = dev_data->max_pasids; + int ret = 0; + + /* + * If domain is in pt mode then setup GCR3 table only if device + * is PASID capable + */ + if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) + return ret; + + /* + * By default, setup GCR3 table to support MAX PASIDs + * supported by the device/IOMMU. + */ + ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, + max_pasids > 0 ? max_pasids : 1); + if (ret) + return ret; + + /* Setup GCR3[0] only if domain is setup with v2 page table mode */ + if (!pdom_is_v2_pgtbl_mode(pdom)) + return ret; + + ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); + if (ret) + free_gcr3_table(&dev_data->gcr3_info); + + return ret; +} + +static void destroy_gcr3_table(struct iommu_dev_data *dev_data, + struct protection_domain *pdom) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + + if (pdom_is_v2_pgtbl_mode(pdom)) + update_gcr3(dev_data, 0, 0, false); + + if (gcr3_info->gcr3_tbl == NULL) + return; + + free_gcr3_table(gcr3_info); +} + static int do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { @@ -2030,27 +2055,13 @@ static int do_attach(struct iommu_dev_data *dev_data, domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; - /* Init GCR3 table and update device table */ - if (domain->pd_mode == PD_MODE_V2) { - /* By default, setup GCR3 table to support single PASID */ - ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1); + /* Setup GCR3 table */ + if (pdom_is_sva_capable(domain)) { + ret = init_gcr3_table(dev_data, domain); if (ret) return ret; - - ret = update_gcr3(dev_data, 0, - iommu_virt_to_phys(domain->iop.pgd), true); - if (ret) { - free_gcr3_table(&dev_data->gcr3_info); - return ret; - } } - /* Update device table */ - set_dte_entry(iommu, dev_data); - clone_aliases(iommu, dev_data->dev); - - device_flush_dte(dev_data); - return ret; } @@ -2059,23 +2070,19 @@ static void do_detach(struct iommu_dev_data *dev_data) struct protection_domain *domain = dev_data->domain; struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + /* Clear DTE and flush the entry */ + amd_iommu_dev_update_dte(dev_data, false); + + /* Flush IOTLB and wait for the flushes to finish */ + amd_iommu_domain_flush_all(domain); + /* Clear GCR3 table */ - if (domain->pd_mode == PD_MODE_V2) { - update_gcr3(dev_data, 0, 0, false); - free_gcr3_table(&dev_data->gcr3_info); - } + if (pdom_is_sva_capable(domain)) + destroy_gcr3_table(dev_data, domain); /* Update data structures */ dev_data->domain = NULL; list_del(&dev_data->list); - clear_dte_entry(iommu, dev_data->devid); - clone_aliases(iommu, dev_data->dev); - - /* Flush the DTE entry */ - device_flush_dte(dev_data); - - /* Flush IOTLB and wait for the flushes to finish */ - amd_iommu_domain_flush_all(domain); /* decrease reference counters - needs to happen after the flushes */ domain->dev_iommu[iommu->index] -= 1; @@ -2104,9 +2111,6 @@ static int attach_device(struct device *dev, goto out; } - if (dev_is_pci(dev)) - pdev_enable_caps(to_pci_dev(dev)); - ret = do_attach(dev_data, domain); out: @@ -2122,12 +2126,11 @@ static int attach_device(struct device *dev, */ static void detach_device(struct device *dev) { - struct protection_domain *domain; - struct iommu_dev_data *dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + struct protection_domain *domain = dev_data->domain; + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); unsigned long flags; - - dev_data = dev_iommu_priv_get(dev); - domain = dev_data->domain; + bool ppr = dev_data->ppr; spin_lock_irqsave(&domain->lock, flags); @@ -2142,21 +2145,34 @@ static void detach_device(struct device *dev) if (WARN_ON(!dev_data->domain)) goto out; - do_detach(dev_data); + if (ppr) { + iopf_queue_flush_dev(dev); - if (dev_is_pci(dev)) - pdev_disable_caps(to_pci_dev(dev)); + /* Updated here so that it gets reflected in DTE */ + dev_data->ppr = false; + } + + do_detach(dev_data); out: spin_unlock(&dev_data->lock); spin_unlock_irqrestore(&domain->lock, flags); + + /* Remove IOPF handler */ + if (ppr) + amd_iommu_iopf_remove_device(iommu, dev_data); + + if (dev_is_pci(dev)) + pdev_disable_caps(to_pci_dev(dev)); + } static struct iommu_device *amd_iommu_probe_device(struct device *dev) { struct iommu_device *iommu_dev; struct amd_iommu *iommu; + struct iommu_dev_data *dev_data; int ret; if (!check_device(dev)) @@ -2183,18 +2199,22 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) iommu_dev = &iommu->iommu; } + /* + * If IOMMU and device supports PASID then it will contain max + * supported PASIDs, else it will be zero. + */ + dev_data = dev_iommu_priv_get(dev); + if (amd_iommu_pasid_supported() && dev_is_pci(dev) && + pdev_pasid_supported(dev_data)) { + dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, + pci_max_pasids(to_pci_dev(dev))); + } + iommu_completion_wait(iommu); return iommu_dev; } -static void amd_iommu_probe_finalize(struct device *dev) -{ - /* Domains are initialized for this device - have a look what we ended up with */ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); -} - static void amd_iommu_release_device(struct device *dev) { struct amd_iommu *iommu; @@ -2246,7 +2266,7 @@ static void cleanup_domain(struct protection_domain *domain) WARN_ON(domain->dev_cnt != 0); } -static void protection_domain_free(struct protection_domain *domain) +void protection_domain_free(struct protection_domain *domain) { if (!domain) return; @@ -2289,7 +2309,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom) return 0; } -static struct protection_domain *protection_domain_alloc(unsigned int type) +struct protection_domain *protection_domain_alloc(unsigned int type) { struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; @@ -2306,11 +2326,13 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) spin_lock_init(&domain->lock); INIT_LIST_HEAD(&domain->dev_list); + INIT_LIST_HEAD(&domain->dev_data_list); domain->nid = NUMA_NO_NODE; switch (type) { /* No need to allocate io pgtable ops in passthrough mode */ case IOMMU_DOMAIN_IDENTITY: + case IOMMU_DOMAIN_SVA: return domain; case IOMMU_DOMAIN_DMA: pgtable = amd_iommu_pgtable; @@ -2443,7 +2465,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags, return do_iommu_domain_alloc(type, dev, flags); } -static void amd_iommu_domain_free(struct iommu_domain *dom) +void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; unsigned long flags; @@ -2468,6 +2490,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); + struct pci_dev *pdev; int ret; /* @@ -2500,7 +2523,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - iommu_completion_wait(iommu); + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); + + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } + + /* Update device table */ + amd_iommu_dev_update_dte(dev_data, true); return ret; } @@ -2777,6 +2816,10 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; + /* Always use DMA domain for untrusted device */ + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; + /* * Do not identity map IOMMUv2 capable devices when: * - memory encryption is active, because some of those devices @@ -2804,18 +2847,54 @@ static const struct iommu_dirty_ops amd_dirty_ops = { .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, }; +static int amd_iommu_dev_enable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + int ret = 0; + + switch (feat) { + case IOMMU_DEV_FEAT_IOPF: + case IOMMU_DEV_FEAT_SVA: + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int amd_iommu_dev_disable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + int ret = 0; + + switch (feat) { + case IOMMU_DEV_FEAT_IOPF: + case IOMMU_DEV_FEAT_SVA: + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, .domain_alloc_user = amd_iommu_domain_alloc_user, + .domain_alloc_sva = amd_iommu_domain_alloc_sva, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, - .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, .get_resv_regions = amd_iommu_get_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, .def_domain_type = amd_iommu_def_domain_type, + .dev_enable_feat = amd_iommu_dev_enable_feature, + .dev_disable_feat = amd_iommu_dev_disable_feature, + .remove_dev_pasid = amd_iommu_remove_dev_pasid, + .page_response = amd_iommu_page_response, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = amd_iommu_attach_device, .map_pages = amd_iommu_map_pages, diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c new file mode 100644 index 0000000000000000000000000000000000000000..a68215f2b3e1d6a4a422bffa9d2081ee3a3bed20 --- /dev/null +++ b/drivers/iommu/amd/pasid.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Advanced Micro Devices, Inc. + */ + +#define pr_fmt(fmt) "AMD-Vi: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) + +#include +#include + +#include "amd_iommu.h" + +static inline bool is_pasid_enabled(struct iommu_dev_data *dev_data) +{ + if (dev_data->pasid_enabled && dev_data->max_pasids && + dev_data->gcr3_info.gcr3_tbl != NULL) + return true; + + return false; +} + +static inline bool is_pasid_valid(struct iommu_dev_data *dev_data, + ioasid_t pasid) +{ + if (pasid > 0 && pasid < dev_data->max_pasids) + return true; + + return false; +} + +static void remove_dev_pasid(struct pdom_dev_data *pdom_dev_data) +{ + /* Update GCR3 table and flush IOTLB */ + amd_iommu_clear_gcr3(pdom_dev_data->dev_data, pdom_dev_data->pasid); + + list_del(&pdom_dev_data->list); + kfree(pdom_dev_data); +} + +/* Clear PASID from device GCR3 table and remove pdom_dev_data from list */ +static void remove_pdom_dev_pasid(struct protection_domain *pdom, + struct device *dev, ioasid_t pasid) +{ + struct pdom_dev_data *pdom_dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + + lockdep_assert_held(&pdom->lock); + + for_each_pdom_dev_data(pdom_dev_data, pdom) { + if (pdom_dev_data->dev_data == dev_data && + pdom_dev_data->pasid == pasid) { + remove_dev_pasid(pdom_dev_data); + break; + } + } +} + +static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct pdom_dev_data *pdom_dev_data; + struct protection_domain *sva_pdom; + unsigned long flags; + + sva_pdom = container_of(mn, struct protection_domain, mn); + + spin_lock_irqsave(&sva_pdom->lock, flags); + + for_each_pdom_dev_data(pdom_dev_data, sva_pdom) { + amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data, + pdom_dev_data->pasid, + start, end - start); + } + + spin_unlock_irqrestore(&sva_pdom->lock, flags); +} + +static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct pdom_dev_data *pdom_dev_data, *next; + struct protection_domain *sva_pdom; + unsigned long flags; + + sva_pdom = container_of(mn, struct protection_domain, mn); + + spin_lock_irqsave(&sva_pdom->lock, flags); + + /* Assume dev_data_list contains same PASID with different devices */ + for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom) + remove_dev_pasid(pdom_dev_data); + + spin_unlock_irqrestore(&sva_pdom->lock, flags); +} + +static const struct mmu_notifier_ops sva_mn = { + .arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs, + .release = sva_mn_release, +}; + +int iommu_sva_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ + struct pdom_dev_data *pdom_dev_data; + struct protection_domain *sva_pdom = to_pdomain(domain); + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + unsigned long flags; + int ret = -EINVAL; + + /* PASID zero is used for requests from the I/O device without PASID */ + if (!is_pasid_valid(dev_data, pasid)) + return ret; + + /* Make sure PASID is enabled */ + if (!is_pasid_enabled(dev_data)) + return ret; + + /* Add PASID to protection domain pasid list */ + pdom_dev_data = kzalloc(sizeof(*pdom_dev_data), GFP_KERNEL); + if (pdom_dev_data == NULL) + return ret; + + pdom_dev_data->pasid = pasid; + pdom_dev_data->dev_data = dev_data; + + spin_lock_irqsave(&sva_pdom->lock, flags); + + /* Setup GCR3 table */ + ret = amd_iommu_set_gcr3(dev_data, pasid, + iommu_virt_to_phys(domain->mm->pgd)); + if (ret) { + kfree(pdom_dev_data); + goto out_unlock; + } + + list_add(&pdom_dev_data->list, &sva_pdom->dev_data_list); + +out_unlock: + spin_unlock_irqrestore(&sva_pdom->lock, flags); + return ret; +} + +void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) +{ + struct protection_domain *sva_pdom; + unsigned long flags; + + if (!is_pasid_valid(dev_iommu_priv_get(dev), pasid)) + return; + + sva_pdom = to_pdomain(domain); + + spin_lock_irqsave(&sva_pdom->lock, flags); + + /* Remove PASID from dev_data_list */ + remove_pdom_dev_pasid(sva_pdom, dev, pasid); + + spin_unlock_irqrestore(&sva_pdom->lock, flags); +} + +static void iommu_sva_domain_free(struct iommu_domain *domain) +{ + struct protection_domain *sva_pdom = to_pdomain(domain); + + if (sva_pdom->mn.ops) + mmu_notifier_unregister(&sva_pdom->mn, domain->mm); + + amd_iommu_domain_free(domain); +} + +static const struct iommu_domain_ops amd_sva_domain_ops = { + .set_dev_pasid = iommu_sva_set_dev_pasid, + .free = iommu_sva_domain_free +}; + +struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev, + struct mm_struct *mm) +{ + struct protection_domain *pdom; + int ret; + + pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA); + if (!pdom) + return ERR_PTR(-ENOMEM); + + pdom->domain.ops = &amd_sva_domain_ops; + pdom->mn.ops = &sva_mn; + + ret = mmu_notifier_register(&pdom->mn, mm); + if (ret) { + protection_domain_free(pdom); + return ERR_PTR(ret); + } + + return &pdom->domain; +} diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c new file mode 100644 index 0000000000000000000000000000000000000000..a504f02dd904072736840718a1c24445cc398ae6 --- /dev/null +++ b/drivers/iommu/amd/ppr.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + */ + +#define pr_fmt(fmt) "AMD-Vi: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) + +#include +#include +#include + +#include + +#include "amd_iommu.h" +#include "amd_iommu_types.h" + +int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu) +{ + iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, + PPR_LOG_SIZE); + return iommu->ppr_log ? 0 : -ENOMEM; +} + +void amd_iommu_enable_ppr_log(struct amd_iommu *iommu) +{ + u64 entry; + + if (iommu->ppr_log == NULL) + return; + + iommu_feature_enable(iommu, CONTROL_PPR_EN); + + entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; + + memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, + &entry, sizeof(entry)); + + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_PPRINT_EN); + iommu_feature_enable(iommu, CONTROL_PPRLOG_EN); +} + +void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu) +{ + free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); +} + +/* + * This function restarts ppr logging in case the IOMMU experienced + * PPR log overflow. + */ +void amd_iommu_restart_ppr_log(struct amd_iommu *iommu) +{ + amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN, + CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK, + MMIO_STATUS_PPR_OVERFLOW_MASK); +} + +static inline u32 ppr_flag_to_fault_perm(u16 flag) +{ + int perm = 0; + + if (flag & PPR_FLAG_READ) + perm |= IOMMU_FAULT_PERM_READ; + if (flag & PPR_FLAG_WRITE) + perm |= IOMMU_FAULT_PERM_WRITE; + if (flag & PPR_FLAG_EXEC) + perm |= IOMMU_FAULT_PERM_EXEC; + if (!(flag & PPR_FLAG_US)) + perm |= IOMMU_FAULT_PERM_PRIV; + + return perm; +} + +static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw) +{ + struct device *dev = iommu->iommu.dev; + u16 devid = PPR_DEVID(raw[0]); + + if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) { + dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x " + "pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0])); + return false; + } + + if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) { + dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x " + "pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n", + iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0])); + return false; + } + + return true; +} + +static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw) +{ + struct iommu_dev_data *dev_data; + struct iopf_fault event; + struct pci_dev *pdev; + u16 devid = PPR_DEVID(raw[0]); + + if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { + pr_info_ratelimited("Unknown PPR request received\n"); + return; + } + + pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, + PCI_BUS_NUM(devid), devid & 0xff); + if (!pdev) + return; + + if (!ppr_is_valid(iommu, raw)) + goto out; + + memset(&event, 0, sizeof(struct iopf_fault)); + + event.fault.type = IOMMU_FAULT_PAGE_REQ; + event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0])); + event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK); + event.fault.prm.pasid = PPR_PASID(raw[0]); + event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF; + + /* + * PASID zero is used for requests from the I/O device without + * a PASID + */ + dev_data = dev_iommu_priv_get(&pdev->dev); + if (event.fault.prm.pasid == 0 || + event.fault.prm.pasid >= dev_data->max_pasids) { + pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n", + event.fault.prm.pasid, pdev->dev.id); + goto out; + } + + event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + if (PPR_TAG(raw[0]) & 0x200) + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + + /* Submit event */ + iommu_report_device_fault(&pdev->dev, &event); + + return; + +out: + /* Nobody cared, abort */ + amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]), + IOMMU_PAGE_RESP_FAILURE, + PPR_TAG(raw[0]) & 0x1FF); +} + +void amd_iommu_poll_ppr_log(struct amd_iommu *iommu) +{ + u32 head, tail; + + if (iommu->ppr_log == NULL) + return; + + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + + while (head != tail) { + volatile u64 *raw; + u64 entry[2]; + int i; + + raw = (u64 *)(iommu->ppr_log + head); + + /* + * Hardware bug: Interrupt may arrive before the entry is + * written to memory. If this happens we need to wait for the + * entry to arrive. + */ + for (i = 0; i < LOOP_TIMEOUT; ++i) { + if (PPR_REQ_TYPE(raw[0]) != 0) + break; + udelay(1); + } + + /* Avoid memcpy function-call overhead */ + entry[0] = raw[0]; + entry[1] = raw[1]; + + /* + * To detect the hardware errata 733 we need to clear the + * entry back to zero. This issue does not exist on SNP + * enabled system. Also this buffer is not writeable on + * SNP enabled system. + */ + if (!amd_iommu_snp_en) + raw[0] = raw[1] = 0UL; + + /* Update head pointer of hardware ring-buffer */ + head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; + writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + + /* Handle PPR entry */ + iommu_call_iopf_notifier(iommu, entry); + } +} + +/************************************************************** + * + * IOPF handling stuff + */ + +/* Setup per-IOMMU IOPF queue if not exist. */ +int amd_iommu_iopf_init(struct amd_iommu *iommu) +{ + int ret = 0; + + if (iommu->iopf_queue) + return ret; + + snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x", + PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid)); + + iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name); + if (!iommu->iopf_queue) + ret = -ENOMEM; + + return ret; +} + +/* Destroy per-IOMMU IOPF queue if no longer needed. */ +void amd_iommu_iopf_uninit(struct amd_iommu *iommu) +{ + iopf_queue_free(iommu->iopf_queue); + iommu->iopf_queue = NULL; +} + +void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *resp) +{ + amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid); +} + +int amd_iommu_iopf_add_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data) +{ + int ret = 0; + + if (!dev_data->pri_enabled) + return ret; + + if (!iommu->iopf_queue) + return -EINVAL; + + ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev); + if (ret) + return ret; + + dev_data->ppr = true; + return 0; +} + +/* Its assumed that caller has verified that device was added to iopf queue */ +void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data) +{ + iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev); + dev_data->ppr = false; +} diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile index 54feb1ecccad89924356438fb1bb2375a6957f15..355173d1441d2f86044a181a3bf045dffc707480 100644 --- a/drivers/iommu/arm/arm-smmu-v3/Makefile +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o -arm_smmu_v3-objs-y += arm-smmu-v3.o -arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o -arm_smmu_v3-objs := $(arm_smmu_v3-objs-y) +arm_smmu_v3-y := arm-smmu-v3.o +arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o + +obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 41b44baef15e807f7be5d3f90510425635125a8a..a7c36654dee5a504835faa95be65d8be102f8675 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -8,199 +8,112 @@ #include #include #include +#include #include "arm-smmu-v3.h" #include "../../io-pgtable-arm.h" -struct arm_smmu_mmu_notifier { - struct mmu_notifier mn; - struct arm_smmu_ctx_desc *cd; - bool cleared; - refcount_t refs; - struct list_head list; - struct arm_smmu_domain *domain; -}; - -#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn) - -struct arm_smmu_bond { - struct mm_struct *mm; - struct arm_smmu_mmu_notifier *smmu_mn; - struct list_head list; -}; - -#define sva_to_bond(handle) \ - container_of(handle, struct arm_smmu_bond, sva) - static DEFINE_MUTEX(sva_lock); -/* - * Write the CD to the CD tables for all masters that this domain is attached - * to. Note that this is only used to update existing CD entries in the target - * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail. - */ -static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain, - int ssid, - struct arm_smmu_ctx_desc *cd) +static void __maybe_unused +arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain) { - struct arm_smmu_master *master; + struct arm_smmu_master_domain *master_domain; + struct arm_smmu_cd target_cd; unsigned long flags; spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) { - arm_smmu_write_ctx_desc(master, ssid, cd); + list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) { + struct arm_smmu_master *master = master_domain->master; + struct arm_smmu_cd *cdptr; + + cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); + if (WARN_ON(!cdptr)) + continue; + + arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); + arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, + &target_cd); } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); } -/* - * Check if the CPU ASID is available on the SMMU side. If a private context - * descriptor is using it, try to replace it. - */ -static struct arm_smmu_ctx_desc * -arm_smmu_share_asid(struct mm_struct *mm, u16 asid) +static u64 page_size_to_cd(void) { - int ret; - u32 new_asid; - struct arm_smmu_ctx_desc *cd; - struct arm_smmu_device *smmu; - struct arm_smmu_domain *smmu_domain; - - cd = xa_load(&arm_smmu_asid_xa, asid); - if (!cd) - return NULL; - - if (cd->mm) { - if (WARN_ON(cd->mm != mm)) - return ERR_PTR(-EINVAL); - /* All devices bound to this mm use the same cd struct. */ - refcount_inc(&cd->refs); - return cd; - } - - smmu_domain = container_of(cd, struct arm_smmu_domain, cd); - smmu = smmu_domain->smmu; - - ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd, - XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); - if (ret) - return ERR_PTR(-ENOSPC); - /* - * Race with unmap: TLB invalidations will start targeting the new ASID, - * which isn't assigned yet. We'll do an invalidate-all on the old ASID - * later, so it doesn't matter. - */ - cd->asid = new_asid; - /* - * Update ASID and invalidate CD in all associated masters. There will - * be some overlap between use of both ASIDs, until we invalidate the - * TLB. - */ - arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd); - - /* Invalidate TLB entries previously associated with that context */ - arm_smmu_tlb_inv_asid(smmu, asid); - - xa_erase(&arm_smmu_asid_xa, asid); - return NULL; + static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K || + PAGE_SIZE == SZ_64K); + if (PAGE_SIZE == SZ_64K) + return ARM_LPAE_TCR_TG0_64K; + if (PAGE_SIZE == SZ_16K) + return ARM_LPAE_TCR_TG0_16K; + return ARM_LPAE_TCR_TG0_4K; } -static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) +VISIBLE_IF_KUNIT +void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, struct mm_struct *mm, + u16 asid) { - u16 asid; - int err = 0; - u64 tcr, par, reg; - struct arm_smmu_ctx_desc *cd; - struct arm_smmu_ctx_desc *ret = NULL; - - /* Don't free the mm until we release the ASID */ - mmgrab(mm); - - asid = arm64_mm_context_get(mm); - if (!asid) { - err = -ESRCH; - goto out_drop_mm; - } - - cd = kzalloc(sizeof(*cd), GFP_KERNEL); - if (!cd) { - err = -ENOMEM; - goto out_put_context; - } + u64 par; + + memset(target, 0, sizeof(*target)); + + par = cpuid_feature_extract_unsigned_field( + read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1), + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + + target->data[0] = cpu_to_le64( + CTXDESC_CD_0_TCR_EPD1 | +#ifdef __BIG_ENDIAN + CTXDESC_CD_0_ENDI | +#endif + CTXDESC_CD_0_V | + FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) | + CTXDESC_CD_0_AA64 | + (master->stall_enabled ? CTXDESC_CD_0_S : 0) | + CTXDESC_CD_0_R | + CTXDESC_CD_0_A | + CTXDESC_CD_0_ASET | + FIELD_PREP(CTXDESC_CD_0_ASID, asid)); - refcount_set(&cd->refs, 1); - - mutex_lock(&arm_smmu_asid_lock); - ret = arm_smmu_share_asid(mm, asid); - if (ret) { - mutex_unlock(&arm_smmu_asid_lock); - goto out_free_cd; - } - - err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL); - mutex_unlock(&arm_smmu_asid_lock); - - if (err) - goto out_free_asid; - - tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | - FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) | - FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) | - FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) | - CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; - - switch (PAGE_SIZE) { - case SZ_4K: - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K); - break; - case SZ_16K: - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K); - break; - case SZ_64K: - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K); - break; - default: - WARN_ON(1); - err = -EINVAL; - goto out_free_asid; + /* + * If no MM is passed then this creates a SVA entry that faults + * everything. arm_smmu_write_cd_entry() can hitlessly go between these + * two entries types since TTB0 is ignored by HW when EPD0 is set. + */ + if (mm) { + target->data[0] |= cpu_to_le64( + FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, + 64ULL - vabits_actual) | + FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) | + FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, + ARM_LPAE_TCR_RGN_WBWA) | + FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, + ARM_LPAE_TCR_RGN_WBWA) | + FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS)); + + target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) & + CTXDESC_CD_1_TTB0_MASK); + } else { + target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0); + + /* + * Disable stall and immediately generate an abort if stall + * disable is permitted. This speeds up cleanup for an unclean + * exit if the device is still doing a lot of DMA. + */ + if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) + target->data[0] &= + cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R)); } - reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT); - tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par); - - cd->ttbr = virt_to_phys(mm->pgd); - cd->tcr = tcr; /* * MAIR value is pretty much constant and global, so we can just get it * from the current CPU register */ - cd->mair = read_sysreg(mair_el1); - cd->asid = asid; - cd->mm = mm; - - return cd; - -out_free_asid: - arm_smmu_free_asid(cd); -out_free_cd: - kfree(cd); -out_put_context: - arm64_mm_context_put(mm); -out_drop_mm: - mmdrop(mm); - return err < 0 ? ERR_PTR(err) : ret; -} - -static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) -{ - if (arm_smmu_free_asid(cd)) { - /* Unpin ASID */ - arm64_mm_context_put(cd->mm); - mmdrop(cd->mm); - kfree(cd); - } + target->data[3] = cpu_to_le64(read_sysreg(mair_el1)); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd); /* * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this @@ -216,8 +129,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, unsigned long start, unsigned long end) { - struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); - struct arm_smmu_domain *smmu_domain = smmu_mn->domain; + struct arm_smmu_domain *smmu_domain = + container_of(mn, struct arm_smmu_domain, mmu_notifier); size_t size; /* @@ -234,49 +147,50 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, size = 0; } - if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { - if (!size) - arm_smmu_tlb_inv_asid(smmu_domain->smmu, - smmu_mn->cd->asid); - else - arm_smmu_tlb_inv_range_asid(start, size, - smmu_mn->cd->asid, - PAGE_SIZE, false, - smmu_domain); - } + if (!size) + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + else + arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid, + PAGE_SIZE, false, smmu_domain); - arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start, - size); + arm_smmu_atc_inv_domain(smmu_domain, start, size); } static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) { - struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); - struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - - mutex_lock(&sva_lock); - if (smmu_mn->cleared) { - mutex_unlock(&sva_lock); - return; - } + struct arm_smmu_domain *smmu_domain = + container_of(mn, struct arm_smmu_domain, mmu_notifier); + struct arm_smmu_master_domain *master_domain; + unsigned long flags; /* * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, * but disable translation. */ - arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm), - &quiet_cd); - - arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); - arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0); + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + struct arm_smmu_master *master = master_domain->master; + struct arm_smmu_cd target; + struct arm_smmu_cd *cdptr; + + cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid); + if (WARN_ON(!cdptr)) + continue; + arm_smmu_make_sva_cd(&target, master, NULL, + smmu_domain->cd.asid); + arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr, + &target); + } + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - smmu_mn->cleared = true; - mutex_unlock(&sva_lock); + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0); } static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) { - kfree(mn_to_smmu(mn)); + kfree(container_of(mn, struct arm_smmu_domain, mmu_notifier)); } static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { @@ -285,121 +199,6 @@ static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { .free_notifier = arm_smmu_mmu_notifier_free, }; -/* Allocate or get existing MMU notifier for this {domain, mm} pair */ -static struct arm_smmu_mmu_notifier * -arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, - struct mm_struct *mm) -{ - int ret; - struct arm_smmu_ctx_desc *cd; - struct arm_smmu_mmu_notifier *smmu_mn; - - list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { - if (smmu_mn->mn.mm == mm) { - refcount_inc(&smmu_mn->refs); - return smmu_mn; - } - } - - cd = arm_smmu_alloc_shared_cd(mm); - if (IS_ERR(cd)) - return ERR_CAST(cd); - - smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL); - if (!smmu_mn) { - ret = -ENOMEM; - goto err_free_cd; - } - - refcount_set(&smmu_mn->refs, 1); - smmu_mn->cd = cd; - smmu_mn->domain = smmu_domain; - smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; - - ret = mmu_notifier_register(&smmu_mn->mn, mm); - if (ret) { - kfree(smmu_mn); - goto err_free_cd; - } - - list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); - return smmu_mn; - -err_free_cd: - arm_smmu_free_shared_cd(cd); - return ERR_PTR(ret); -} - -static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) -{ - struct mm_struct *mm = smmu_mn->mn.mm; - struct arm_smmu_ctx_desc *cd = smmu_mn->cd; - struct arm_smmu_domain *smmu_domain = smmu_mn->domain; - - if (!refcount_dec_and_test(&smmu_mn->refs)) - return; - - list_del(&smmu_mn->list); - - /* - * If we went through clear(), we've already invalidated, and no - * new TLB entry can have been formed. - */ - if (!smmu_mn->cleared) { - arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); - arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, - 0); - } - - /* Frees smmu_mn */ - mmu_notifier_put(&smmu_mn->mn); - arm_smmu_free_shared_cd(cd); -} - -static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid, - struct mm_struct *mm) -{ - int ret; - struct arm_smmu_bond *bond; - struct arm_smmu_master *master = dev_iommu_priv_get(dev); - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct arm_smmu_domain *smmu_domain; - - if (!(domain->type & __IOMMU_DOMAIN_PAGING)) - return -ENODEV; - smmu_domain = to_smmu_domain(domain); - if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) - return -ENODEV; - - if (!master || !master->sva_enabled) - return -ENODEV; - - bond = kzalloc(sizeof(*bond), GFP_KERNEL); - if (!bond) - return -ENOMEM; - - bond->mm = mm; - - bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); - if (IS_ERR(bond->smmu_mn)) { - ret = PTR_ERR(bond->smmu_mn); - goto err_free_bond; - } - - ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd); - if (ret) - goto err_put_notifier; - - list_add(&bond->list, &master->bonds); - return 0; - -err_put_notifier: - arm_smmu_mmu_notifier_put(bond->smmu_mn); -err_free_bond: - kfree(bond); - return ret; -} - bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { unsigned long reg, fld; @@ -516,11 +315,6 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master) int arm_smmu_master_disable_sva(struct arm_smmu_master *master) { mutex_lock(&sva_lock); - if (!list_empty(&master->bonds)) { - dev_err(master->dev, "cannot disable SVA, device is bound\n"); - mutex_unlock(&sva_lock); - return -EBUSY; - } arm_smmu_master_sva_disable_iopf(master); master->sva_enabled = false; mutex_unlock(&sva_lock); @@ -537,51 +331,51 @@ void arm_smmu_sva_notifier_synchronize(void) mmu_notifier_synchronize(); } -void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t id) -{ - struct mm_struct *mm = domain->mm; - struct arm_smmu_bond *bond = NULL, *t; - struct arm_smmu_master *master = dev_iommu_priv_get(dev); - - mutex_lock(&sva_lock); - - arm_smmu_write_ctx_desc(master, id, NULL); - - list_for_each_entry(t, &master->bonds, list) { - if (t->mm == mm) { - bond = t; - break; - } - } - - if (!WARN_ON(!bond)) { - list_del(&bond->list); - arm_smmu_mmu_notifier_put(bond->smmu_mn); - kfree(bond); - } - mutex_unlock(&sva_lock); -} - static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t id) { - int ret = 0; - struct mm_struct *mm = domain->mm; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_cd target; + int ret; - if (mm_get_enqcmd_pasid(mm) != id) + /* Prevent arm_smmu_mm_release from being called while we are attaching */ + if (!mmget_not_zero(domain->mm)) return -EINVAL; - mutex_lock(&sva_lock); - ret = __arm_smmu_sva_bind(dev, id, mm); - mutex_unlock(&sva_lock); + /* + * This does not need the arm_smmu_asid_lock because SVA domains never + * get reassigned + */ + arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid); + ret = arm_smmu_set_pasid(master, smmu_domain, id, &target); + mmput(domain->mm); return ret; } static void arm_smmu_sva_domain_free(struct iommu_domain *domain) { - kfree(domain); + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + /* + * Ensure the ASID is empty in the iommu cache before allowing reuse. + */ + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); + + /* + * Notice that the arm_smmu_mm_arch_invalidate_secondary_tlbs op can + * still be called/running at this point. We allow the ASID to be + * reused, and if there is a race then it just suffers harmless + * unnecessary invalidation. + */ + xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); + + /* + * Actual free is defered to the SRCU callback + * arm_smmu_mmu_notifier_free() + */ + mmu_notifier_put(&smmu_domain->mmu_notifier); } static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { @@ -589,14 +383,38 @@ static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { .free = arm_smmu_sva_domain_free }; -struct iommu_domain *arm_smmu_sva_domain_alloc(void) +struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm) { - struct iommu_domain *domain; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_domain *smmu_domain; + u32 asid; + int ret; + + smmu_domain = arm_smmu_domain_alloc(); + if (IS_ERR(smmu_domain)) + return ERR_CAST(smmu_domain); + smmu_domain->domain.type = IOMMU_DOMAIN_SVA; + smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; + smmu_domain->smmu = smmu; + + ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, + XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); + if (ret) + goto err_free; + + smmu_domain->cd.asid = asid; + smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops; + ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm); + if (ret) + goto err_asid; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - domain->ops = &arm_smmu_sva_domain_ops; + return &smmu_domain->domain; - return domain; +err_asid: + xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); +err_free: + kfree(smmu_domain); + return ERR_PTR(ret); } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c new file mode 100644 index 0000000000000000000000000000000000000000..cceb737a700126455425c7aab8b27fe88adf78a3 --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2024 Google LLC. + */ +#include +#include + +#include "arm-smmu-v3.h" + +struct arm_smmu_test_writer { + struct arm_smmu_entry_writer writer; + struct kunit *test; + const __le64 *init_entry; + const __le64 *target_entry; + __le64 *entry; + + bool invalid_entry_written; + unsigned int num_syncs; +}; + +#define NUM_ENTRY_QWORDS 8 +#define NUM_EXPECTED_SYNCS(x) x + +static struct arm_smmu_ste bypass_ste; +static struct arm_smmu_ste abort_ste; +static struct arm_smmu_device smmu = { + .features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR +}; +static struct mm_struct sva_mm = { + .pgd = (void *)0xdaedbeefdeadbeefULL, +}; + +static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry, + const __le64 *used_bits, + const __le64 *target, + unsigned int length) +{ + bool differs = false; + unsigned int i; + + for (i = 0; i < length; i++) { + if ((entry[i] & used_bits[i]) != target[i]) + differs = true; + } + return differs; +} + +static void +arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer) +{ + struct arm_smmu_test_writer *test_writer = + container_of(writer, struct arm_smmu_test_writer, writer); + __le64 *entry_used_bits; + + entry_used_bits = kunit_kzalloc( + test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS, + GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits); + + pr_debug("STE value is now set to: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, + test_writer->entry, + NUM_ENTRY_QWORDS * sizeof(*test_writer->entry), + false); + + test_writer->num_syncs += 1; + if (!test_writer->entry[0]) { + test_writer->invalid_entry_written = true; + } else { + /* + * At any stage in a hitless transition, the entry must be + * equivalent to either the initial entry or the target entry + * when only considering the bits used by the current + * configuration. + */ + writer->ops->get_used(test_writer->entry, entry_used_bits); + KUNIT_EXPECT_FALSE( + test_writer->test, + arm_smmu_entry_differs_in_used_bits( + test_writer->entry, entry_used_bits, + test_writer->init_entry, NUM_ENTRY_QWORDS) && + arm_smmu_entry_differs_in_used_bits( + test_writer->entry, entry_used_bits, + test_writer->target_entry, + NUM_ENTRY_QWORDS)); + } +} + +static void +arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer, + const __le64 *ste) +{ + __le64 used_bits[NUM_ENTRY_QWORDS] = {}; + + arm_smmu_get_ste_used(ste, used_bits); + pr_debug("STE used bits: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, used_bits, + sizeof(used_bits), false); +} + +static const struct arm_smmu_entry_writer_ops test_ste_ops = { + .sync = arm_smmu_test_writer_record_syncs, + .get_used = arm_smmu_get_ste_used, +}; + +static const struct arm_smmu_entry_writer_ops test_cd_ops = { + .sync = arm_smmu_test_writer_record_syncs, + .get_used = arm_smmu_get_cd_used, +}; + +static void arm_smmu_v3_test_ste_expect_transition( + struct kunit *test, const struct arm_smmu_ste *cur, + const struct arm_smmu_ste *target, unsigned int num_syncs_expected, + bool hitless) +{ + struct arm_smmu_ste cur_copy = *cur; + struct arm_smmu_test_writer test_writer = { + .writer = { + .ops = &test_ste_ops, + }, + .test = test, + .init_entry = cur->data, + .target_entry = target->data, + .entry = cur_copy.data, + .num_syncs = 0, + .invalid_entry_written = false, + + }; + + pr_debug("STE initial value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data); + pr_debug("STE target value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, + target->data); + + arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data); + + KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless); + KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected); + KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy)); +} + +static void arm_smmu_v3_test_ste_expect_non_hitless_transition( + struct kunit *test, const struct arm_smmu_ste *cur, + const struct arm_smmu_ste *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_ste_expect_transition(test, cur, target, + num_syncs_expected, false); +} + +static void arm_smmu_v3_test_ste_expect_hitless_transition( + struct kunit *test, const struct arm_smmu_ste *cur, + const struct arm_smmu_ste *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_ste_expect_transition(test, cur, target, + num_syncs_expected, true); +} + +static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0; + +static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste, + unsigned int s1dss, + const dma_addr_t dma_addr) +{ + struct arm_smmu_master master = { + .cd_table.cdtab_dma = dma_addr, + .cd_table.s1cdmax = 0xFF, + .cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2, + .smmu = &smmu, + }; + + arm_smmu_make_cdtable_ste(ste, &master, true, s1dss); +} + +static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test) +{ + /* + * Bypass STEs has used bits in the first two Qwords, while abort STEs + * only have used bits in the first QWord. Transitioning from bypass to + * abort requires two syncs: the first to set the first qword and make + * the STE into an abort, the second to clean up the second qword. + */ + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test) +{ + /* + * Transitioning from abort to bypass also requires two syncs: the first + * to set the second qword data required by the bypass STE, and the + * second to set the first qword and switch to bypass. + */ + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test) +{ + struct arm_smmu_ste ste; + struct arm_smmu_ste s1dss_bypass; + + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, + fake_cdtab_dma_addr); + + /* + * Flipping s1dss on a CD table STE only involves changes to the second + * qword of an STE and can be done in a single write. + */ + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1)); + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1)); +} + +static void +arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test) +{ + struct arm_smmu_ste s1dss_bypass; + + arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2)); +} + +static void +arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test) +{ + struct arm_smmu_ste s1dss_bypass; + + arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS, + fake_cdtab_dma_addr); + arm_smmu_v3_test_ste_expect_hitless_transition( + test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste, + bool ats_enabled) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + struct io_pgtable io_pgtable = {}; + struct arm_smmu_domain smmu_domain = { + .pgtbl_ops = &io_pgtable.ops, + }; + + io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3; + io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4; + + arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled); +} + +static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test) +{ + struct arm_smmu_ste ste; + + arm_smmu_test_make_s2_ste(&ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, + NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test) +{ + struct arm_smmu_ste s1_ste; + struct arm_smmu_ste s2_ste; + + arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_s2_ste(&s2_ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test) +{ + struct arm_smmu_ste s1_ste; + struct arm_smmu_ste s2_ste; + + arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_s2_ste(&s2_ste, true); + arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste, + NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test) +{ + struct arm_smmu_ste ste; + struct arm_smmu_ste ste_2; + + /* + * Although no flow resembles this in practice, one way to force an STE + * update to be non-hitless is to change its CD table pointer as well as + * s1 dss field in the same update. + */ + arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0, + fake_cdtab_dma_addr); + arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS, + 0x4B4B4b4B4B); + arm_smmu_v3_test_ste_expect_non_hitless_transition( + test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3)); +} + +static void arm_smmu_v3_test_cd_expect_transition( + struct kunit *test, const struct arm_smmu_cd *cur, + const struct arm_smmu_cd *target, unsigned int num_syncs_expected, + bool hitless) +{ + struct arm_smmu_cd cur_copy = *cur; + struct arm_smmu_test_writer test_writer = { + .writer = { + .ops = &test_cd_ops, + }, + .test = test, + .init_entry = cur->data, + .target_entry = target->data, + .entry = cur_copy.data, + .num_syncs = 0, + .invalid_entry_written = false, + + }; + + pr_debug("CD initial value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data); + pr_debug("CD target value: "); + print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data, + sizeof(cur_copy), false); + arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, + target->data); + + arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data); + + KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless); + KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected); + KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy)); +} + +static void arm_smmu_v3_test_cd_expect_non_hitless_transition( + struct kunit *test, const struct arm_smmu_cd *cur, + const struct arm_smmu_cd *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_cd_expect_transition(test, cur, target, + num_syncs_expected, false); +} + +static void arm_smmu_v3_test_cd_expect_hitless_transition( + struct kunit *test, const struct arm_smmu_cd *cur, + const struct arm_smmu_cd *target, unsigned int num_syncs_expected) +{ + arm_smmu_v3_test_cd_expect_transition(test, cur, target, + num_syncs_expected, true); +} + +static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + struct io_pgtable io_pgtable = {}; + struct arm_smmu_domain smmu_domain = { + .pgtbl_ops = &io_pgtable.ops, + .cd = { + .asid = asid, + }, + }; + + io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2; + io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4; + io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL; + + arm_smmu_make_s1_cd(cd, &master, &smmu_domain); +} + +static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test) +{ + struct arm_smmu_cd cd = {}; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_s1_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2)); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test) +{ + struct arm_smmu_cd cd = {}; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_s1_cd(&cd, 778); + arm_smmu_test_make_s1_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2, + NUM_EXPECTED_SYNCS(1)); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd, + NUM_EXPECTED_SYNCS(1)); +} + +static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + + arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid); +} + +static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd, + unsigned int asid) +{ + struct arm_smmu_master master = { + .smmu = &smmu, + }; + + arm_smmu_make_sva_cd(cd, &master, NULL, asid); +} + +static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test) +{ + struct arm_smmu_cd cd = {}; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_sva_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2)); + arm_smmu_v3_test_cd_expect_non_hitless_transition( + test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2)); +} + +static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test) +{ + struct arm_smmu_cd cd; + struct arm_smmu_cd cd_2; + + arm_smmu_test_make_sva_cd(&cd, 1997); + arm_smmu_test_make_sva_release_cd(&cd_2, 1997); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2, + NUM_EXPECTED_SYNCS(2)); + arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd, + NUM_EXPECTED_SYNCS(2)); +} + +static struct kunit_case arm_smmu_v3_test_cases[] = { + KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort), + KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort), + KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable), + KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable), + KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort), + KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass), + KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2), + KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1), + KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless), + KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear), + KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid), + KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear), + KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release), + {}, +}; + +static int arm_smmu_v3_test_suite_init(struct kunit_suite *test) +{ + arm_smmu_make_bypass_ste(&smmu, &bypass_ste); + arm_smmu_make_abort_ste(&abort_ste); + return 0; +} + +static struct kunit_suite arm_smmu_v3_test_module = { + .name = "arm-smmu-v3-kunit-test", + .suite_init = arm_smmu_v3_test_suite_init, + .test_cases = arm_smmu_v3_test_cases, +}; +kunit_test_suites(&arm_smmu_v3_test_module); + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); +MODULE_DESCRIPTION("KUnit tests for arm-smmu-v3 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index e064fd8bec6d05bd4955efca2b99ebd7cefb740d..2478178dfda7aa390b2701c5b4b905ffd089873a 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include "arm-smmu-v3.h" #include "../../dma-iommu.h" @@ -35,6 +37,9 @@ module_param(disable_msipolling, bool, 0444); MODULE_PARM_DESC(disable_msipolling, "Disable MSI-based polling for CMD_SYNC completion."); +static struct iommu_ops arm_smmu_ops; +static struct iommu_dirty_ops arm_smmu_dirty_ops; + enum arm_smmu_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, @@ -42,8 +47,9 @@ enum arm_smmu_msi_index { ARM_SMMU_MAX_MSIS, }; -static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, - ioasid_t sid); +#define NUM_ENTRY_QWORDS 8 +static_assert(sizeof(struct arm_smmu_ste) == NUM_ENTRY_QWORDS * sizeof(u64)); +static_assert(sizeof(struct arm_smmu_cd) == NUM_ENTRY_QWORDS * sizeof(u64)); static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = { [EVTQ_MSI_INDEX] = { @@ -71,12 +77,6 @@ struct arm_smmu_option_prop { DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa); DEFINE_MUTEX(arm_smmu_asid_lock); -/* - * Special value used by SVA when a process dies, to quiesce a CD without - * disabling it. - */ -struct arm_smmu_ctx_desc quiet_cd = { 0 }; - static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, @@ -84,7 +84,8 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { }; static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_device *smmu); + struct arm_smmu_device *smmu, u32 flags); +static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master); static void parse_driver_options(struct arm_smmu_device *smmu) { @@ -972,44 +973,53 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid) * would be nice if this was complete according to the spec, but minimally it * has to capture the bits this driver uses. */ -static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent, - struct arm_smmu_ste *used_bits) +VISIBLE_IF_KUNIT +void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) { - unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0])); + unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0])); - used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V); - if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V))) + used_bits[0] = cpu_to_le64(STRTAB_STE_0_V); + if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V))) return; - used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG); + used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG); /* S1 translates */ if (cfg & BIT(0)) { - used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT | - STRTAB_STE_0_S1CTXPTR_MASK | - STRTAB_STE_0_S1CDMAX); - used_bits->data[1] |= + used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT | + STRTAB_STE_0_S1CTXPTR_MASK | + STRTAB_STE_0_S1CDMAX); + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR | STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH | STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW | STRTAB_STE_1_EATS); - used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID); + used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID); + + /* + * See 13.5 Summary of attribute/permission configuration fields + * for the SHCFG behavior. + */ + if (FIELD_GET(STRTAB_STE_1_S1DSS, le64_to_cpu(ent[1])) == + STRTAB_STE_1_S1DSS_BYPASS) + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); } /* S2 translates */ if (cfg & BIT(1)) { - used_bits->data[1] |= + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG); - used_bits->data[2] |= + used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR | STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI | STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R); - used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK); + used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK); } if (cfg == STRTAB_STE_0_CFG_BYPASS) - used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); + used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used); /* * Figure out if we can do a hitless update of entry to become target. Returns a @@ -1017,57 +1027,55 @@ static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent, * unused_update is an intermediate value of entry that has unused bits set to * their new values. */ -static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry, - const struct arm_smmu_ste *target, - struct arm_smmu_ste *unused_update) +static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer, + const __le64 *entry, const __le64 *target, + __le64 *unused_update) { - struct arm_smmu_ste target_used = {}; - struct arm_smmu_ste cur_used = {}; + __le64 target_used[NUM_ENTRY_QWORDS] = {}; + __le64 cur_used[NUM_ENTRY_QWORDS] = {}; u8 used_qword_diff = 0; unsigned int i; - arm_smmu_get_ste_used(entry, &cur_used); - arm_smmu_get_ste_used(target, &target_used); + writer->ops->get_used(entry, cur_used); + writer->ops->get_used(target, target_used); - for (i = 0; i != ARRAY_SIZE(target_used.data); i++) { + for (i = 0; i != NUM_ENTRY_QWORDS; i++) { /* * Check that masks are up to date, the make functions are not * allowed to set a bit to 1 if the used function doesn't say it * is used. */ - WARN_ON_ONCE(target->data[i] & ~target_used.data[i]); + WARN_ON_ONCE(target[i] & ~target_used[i]); /* Bits can change because they are not currently being used */ - unused_update->data[i] = (entry->data[i] & cur_used.data[i]) | - (target->data[i] & ~cur_used.data[i]); + unused_update[i] = (entry[i] & cur_used[i]) | + (target[i] & ~cur_used[i]); /* * Each bit indicates that a used bit in a qword needs to be * changed after unused_update is applied. */ - if ((unused_update->data[i] & target_used.data[i]) != - target->data[i]) + if ((unused_update[i] & target_used[i]) != target[i]) used_qword_diff |= 1 << i; } return used_qword_diff; } -static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid, - struct arm_smmu_ste *entry, - const struct arm_smmu_ste *target, unsigned int start, +static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry, + const __le64 *target, unsigned int start, unsigned int len) { bool changed = false; unsigned int i; for (i = start; len != 0; len--, i++) { - if (entry->data[i] != target->data[i]) { - WRITE_ONCE(entry->data[i], target->data[i]); + if (entry[i] != target[i]) { + WRITE_ONCE(entry[i], target[i]); changed = true; } } if (changed) - arm_smmu_sync_ste_for_sid(smmu, sid); + writer->ops->sync(writer); return changed; } @@ -1097,24 +1105,22 @@ static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid, * V=0 process. This relies on the IGNORED behavior described in the * specification. */ -static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, - struct arm_smmu_ste *entry, - const struct arm_smmu_ste *target) +VISIBLE_IF_KUNIT +void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry, + const __le64 *target) { - unsigned int num_entry_qwords = ARRAY_SIZE(target->data); - struct arm_smmu_device *smmu = master->smmu; - struct arm_smmu_ste unused_update; + __le64 unused_update[NUM_ENTRY_QWORDS]; u8 used_qword_diff; used_qword_diff = - arm_smmu_entry_qword_diff(entry, target, &unused_update); + arm_smmu_entry_qword_diff(writer, entry, target, unused_update); if (hweight8(used_qword_diff) == 1) { /* * Only one qword needs its used bits to be changed. This is a - * hitless update, update all bits the current STE is ignoring - * to their new values, then update a single "critical qword" to - * change the STE and finally 0 out any bits that are now unused - * in the target configuration. + * hitless update, update all bits the current STE/CD is + * ignoring to their new values, then update a single "critical + * qword" to change the STE/CD and finally 0 out any bits that + * are now unused in the target configuration. */ unsigned int critical_qword_index = ffs(used_qword_diff) - 1; @@ -1123,22 +1129,21 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, * writing it in the next step anyways. This can save a sync * when the only change is in that qword. */ - unused_update.data[critical_qword_index] = - entry->data[critical_qword_index]; - entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords); - entry_set(smmu, sid, entry, target, critical_qword_index, 1); - entry_set(smmu, sid, entry, target, 0, num_entry_qwords); + unused_update[critical_qword_index] = + entry[critical_qword_index]; + entry_set(writer, entry, unused_update, 0, NUM_ENTRY_QWORDS); + entry_set(writer, entry, target, critical_qword_index, 1); + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS); } else if (used_qword_diff) { /* * At least two qwords need their inuse bits to be changed. This * requires a breaking update, zero the V bit, write all qwords * but 0, then set qword 0 */ - unused_update.data[0] = entry->data[0] & - cpu_to_le64(~STRTAB_STE_0_V); - entry_set(smmu, sid, entry, &unused_update, 0, 1); - entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1); - entry_set(smmu, sid, entry, target, 0, 1); + unused_update[0] = 0; + entry_set(writer, entry, unused_update, 0, 1); + entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1); + entry_set(writer, entry, target, 0, 1); } else { /* * No inuse bit changed. Sanity check that all unused bits are 0 @@ -1146,20 +1151,10 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, * compute_qword_diff(). */ WARN_ON_ONCE( - entry_set(smmu, sid, entry, target, 0, num_entry_qwords)); - } - - /* It's likely that we'll want to use the new STE soon */ - if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) { - struct arm_smmu_cmdq_ent - prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG, - .prefetch = { - .sid = sid, - } }; - - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS)); } } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry); static void arm_smmu_sync_cd(struct arm_smmu_master *master, int ssid, bool leaf) @@ -1205,119 +1200,181 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst, u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V; - /* See comment in arm_smmu_write_ctx_desc() */ + /* The HW has 64 bit atomicity with stores to the L2 CD table */ WRITE_ONCE(*dst, cpu_to_le64(val)); } -static struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, - u32 ssid) +struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, + u32 ssid) { - __le64 *l1ptr; - unsigned int idx; struct arm_smmu_l1_ctx_desc *l1_desc; - struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; + if (!cd_table->cdtab) + return NULL; + if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR) return (struct arm_smmu_cd *)(cd_table->cdtab + ssid * CTXDESC_CD_DWORDS); - idx = ssid >> CTXDESC_SPLIT; - l1_desc = &cd_table->l1_desc[idx]; - if (!l1_desc->l2ptr) { - if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) + l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES]; + if (!l1_desc->l2ptr) + return NULL; + return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES]; +} + +static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, + u32 ssid) +{ + struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; + struct arm_smmu_device *smmu = master->smmu; + + might_sleep(); + iommu_group_mutex_assert(master->dev); + + if (!cd_table->cdtab) { + if (arm_smmu_alloc_cd_tables(master)) return NULL; + } + + if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) { + unsigned int idx = ssid / CTXDESC_L2_ENTRIES; + struct arm_smmu_l1_ctx_desc *l1_desc; - l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS; - arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); - /* An invalid L1CD can be cached */ - arm_smmu_sync_cd(master, ssid, false); + l1_desc = &cd_table->l1_desc[idx]; + if (!l1_desc->l2ptr) { + __le64 *l1ptr; + + if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc)) + return NULL; + + l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS; + arm_smmu_write_cd_l1_desc(l1ptr, l1_desc); + /* An invalid L1CD can be cached */ + arm_smmu_sync_cd(master, ssid, false); + } } - idx = ssid & (CTXDESC_L2_ENTRIES - 1); - return &l1_desc->l2ptr[idx]; + return arm_smmu_get_cd_ptr(master, ssid); } -int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid, - struct arm_smmu_ctx_desc *cd) +struct arm_smmu_cd_writer { + struct arm_smmu_entry_writer writer; + unsigned int ssid; +}; + +VISIBLE_IF_KUNIT +void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits) { + used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V); + if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V))) + return; + memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd)); + /* - * This function handles the following cases: - * - * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0). - * (2) Install a secondary CD, for SID+SSID traffic. - * (3) Update ASID of a CD. Atomically write the first 64 bits of the - * CD, then invalidate the old entry and mappings. - * (4) Quiesce the context without clearing the valid bit. Disable - * translation, and ignore any translation fault. - * (5) Remove a secondary CD. + * If EPD0 is set by the make function it means + * T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED */ - u64 val; - bool cd_live; - struct arm_smmu_cd *cdptr; - struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - struct arm_smmu_device *smmu = master->smmu; + if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) { + used_bits[0] &= ~cpu_to_le64( + CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 | + CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 | + CTXDESC_CD_0_TCR_SH0); + used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK); + } +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used); - if (WARN_ON(ssid >= (1 << cd_table->s1cdmax))) - return -E2BIG; +static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer) +{ + struct arm_smmu_cd_writer *cd_writer = + container_of(writer, struct arm_smmu_cd_writer, writer); - cdptr = arm_smmu_get_cd_ptr(master, ssid); - if (!cdptr) - return -ENOMEM; + arm_smmu_sync_cd(writer->master, cd_writer->ssid, true); +} - val = le64_to_cpu(cdptr->data[0]); - cd_live = !!(val & CTXDESC_CD_0_V); - - if (!cd) { /* (5) */ - val = 0; - } else if (cd == &quiet_cd) { /* (4) */ - if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) - val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R); - val |= CTXDESC_CD_0_TCR_EPD0; - } else if (cd_live) { /* (3) */ - val &= ~CTXDESC_CD_0_ASID; - val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid); - /* - * Until CD+TLB invalidation, both ASIDs may be used for tagging - * this substream's traffic - */ - } else { /* (1) and (2) */ - cdptr->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK); - cdptr->data[2] = 0; - cdptr->data[3] = cpu_to_le64(cd->mair); +static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = { + .sync = arm_smmu_cd_writer_sync_entry, + .get_used = arm_smmu_get_cd_used, +}; - /* - * STE may be live, and the SMMU might read dwords of this CD in any - * order. Ensure that it observes valid values before reading - * V=1. - */ - arm_smmu_sync_cd(master, ssid, true); +void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, + struct arm_smmu_cd *cdptr, + const struct arm_smmu_cd *target) +{ + bool target_valid = target->data[0] & cpu_to_le64(CTXDESC_CD_0_V); + bool cur_valid = cdptr->data[0] & cpu_to_le64(CTXDESC_CD_0_V); + struct arm_smmu_cd_writer cd_writer = { + .writer = { + .ops = &arm_smmu_cd_writer_ops, + .master = master, + }, + .ssid = ssid, + }; + + if (ssid != IOMMU_NO_PASID && cur_valid != target_valid) { + if (cur_valid) + master->cd_table.used_ssids--; + else + master->cd_table.used_ssids++; + } + + arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data); +} - val = cd->tcr | +void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain) +{ + struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; + const struct io_pgtable_cfg *pgtbl_cfg = + &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; + typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = + &pgtbl_cfg->arm_lpae_s1_cfg.tcr; + + memset(target, 0, sizeof(*target)); + + target->data[0] = cpu_to_le64( + FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | + FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | + FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | + FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | + FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | #ifdef __BIG_ENDIAN - CTXDESC_CD_0_ENDI | + CTXDESC_CD_0_ENDI | #endif - CTXDESC_CD_0_R | CTXDESC_CD_0_A | - (cd->mm ? 0 : CTXDESC_CD_0_ASET) | - CTXDESC_CD_0_AA64 | - FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) | - CTXDESC_CD_0_V; - - if (cd_table->stall_enabled) - val |= CTXDESC_CD_0_S; - } + CTXDESC_CD_0_TCR_EPD1 | + CTXDESC_CD_0_V | + FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | + CTXDESC_CD_0_AA64 | + (master->stall_enabled ? CTXDESC_CD_0_S : 0) | + CTXDESC_CD_0_R | + CTXDESC_CD_0_A | + CTXDESC_CD_0_ASET | + FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) + ); + + /* To enable dirty flag update, set both Access flag and dirty state update */ + if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_HD) + target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_HA | + CTXDESC_CD_0_TCR_HD); + + target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr & + CTXDESC_CD_1_TTB0_MASK); + target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair); +} +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd); + +void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid) +{ + struct arm_smmu_cd target = {}; + struct arm_smmu_cd *cdptr; - /* - * The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3 - * "Configuration structures and configuration invalidation completion" - * - * The size of single-copy atomic reads made by the SMMU is - * IMPLEMENTATION DEFINED but must be at least 64 bits. Any single - * field within an aligned 64-bit span of a structure can be altered - * without first making the structure invalid. - */ - WRITE_ONCE(cdptr->data[0], cpu_to_le64(val)); - arm_smmu_sync_cd(master, ssid, true); - return 0; + if (!master->cd_table.cdtab) + return; + cdptr = arm_smmu_get_cd_ptr(master, ssid); + if (WARN_ON(!cdptr)) + return; + arm_smmu_write_cd_entry(master, ssid, cdptr, &target); } static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) @@ -1328,7 +1385,6 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master) struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; - cd_table->stall_enabled = master->stall_enabled; cd_table->s1cdmax = master->ssid_bits; max_contexts = 1 << cd_table->s1cdmax; @@ -1401,58 +1457,83 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_master *master) cd_table->cdtab = NULL; } -bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd) -{ - bool free; - struct arm_smmu_ctx_desc *old_cd; - - if (!cd->asid) - return false; - - free = refcount_dec_and_test(&cd->refs); - if (free) { - old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid); - WARN_ON(old_cd != cd); - } - return free; -} - /* Stream table manipulation functions */ -static void -arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) +static void arm_smmu_write_strtab_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma) { u64 val = 0; - val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); - val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; + val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, STRTAB_SPLIT + 1); + val |= l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; - /* See comment in arm_smmu_write_ctx_desc() */ + /* The HW has 64 bit atomicity with stores to the L2 STE table */ WRITE_ONCE(*dst, cpu_to_le64(val)); } -static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) +struct arm_smmu_ste_writer { + struct arm_smmu_entry_writer writer; + u32 sid; +}; + +static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer) { + struct arm_smmu_ste_writer *ste_writer = + container_of(writer, struct arm_smmu_ste_writer, writer); struct arm_smmu_cmdq_ent cmd = { .opcode = CMDQ_OP_CFGI_STE, .cfgi = { - .sid = sid, + .sid = ste_writer->sid, .leaf = true, }, }; - arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); + arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd); } -static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) +static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = { + .sync = arm_smmu_ste_writer_sync_entry, + .get_used = arm_smmu_get_ste_used, +}; + +static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid, + struct arm_smmu_ste *ste, + const struct arm_smmu_ste *target) +{ + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_ste_writer ste_writer = { + .writer = { + .ops = &arm_smmu_ste_writer_ops, + .master = master, + }, + .sid = sid, + }; + + arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data); + + /* It's likely that we'll want to use the new STE soon */ + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) { + struct arm_smmu_cmdq_ent + prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG, + .prefetch = { + .sid = sid, + } }; + + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + } +} + +VISIBLE_IF_KUNIT +void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) { memset(target, 0, sizeof(*target)); target->data[0] = cpu_to_le64( STRTAB_STE_0_V | FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT)); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste); -static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, - struct arm_smmu_ste *target) +VISIBLE_IF_KUNIT +void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, + struct arm_smmu_ste *target) { memset(target, 0, sizeof(*target)); target->data[0] = cpu_to_le64( @@ -1463,9 +1544,12 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste); -static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, - struct arm_smmu_master *master) +VISIBLE_IF_KUNIT +void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, bool ats_enabled, + unsigned int s1dss) { struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; struct arm_smmu_device *smmu = master->smmu; @@ -1479,7 +1563,7 @@ static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); target->data[1] = cpu_to_le64( - FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | + FIELD_PREP(STRTAB_STE_1_S1DSS, s1dss) | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | @@ -1488,7 +1572,12 @@ static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, STRTAB_STE_1_S1STALLD : 0) | FIELD_PREP(STRTAB_STE_1_EATS, - master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + + if ((smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) && + s1dss == STRTAB_STE_1_S1DSS_BYPASS) + target->data[1] |= cpu_to_le64(FIELD_PREP( + STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); if (smmu->features & ARM_SMMU_FEAT_E2H) { /* @@ -1513,10 +1602,13 @@ static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0)); } } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste); -static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, - struct arm_smmu_master *master, - struct arm_smmu_domain *smmu_domain) +VISIBLE_IF_KUNIT +void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, + bool ats_enabled) { struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; const struct io_pgtable_cfg *pgtbl_cfg = @@ -1533,7 +1625,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, target->data[1] = cpu_to_le64( FIELD_PREP(STRTAB_STE_1_EATS, - master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, @@ -1559,6 +1651,7 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr & STRTAB_STE_3_S2TTB_MASK); } +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste); /* * This can safely directly manipulate the STE memory without a sync sequence @@ -1579,6 +1672,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) { size_t size; void *strtab; + dma_addr_t l2ptr_dma; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; @@ -1588,8 +1682,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; - desc->span = STRTAB_SPLIT + 1; - desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma, + desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &l2ptr_dma, GFP_KERNEL); if (!desc->l2ptr) { dev_err(smmu->dev, @@ -1599,7 +1692,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) } arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT); - arm_smmu_write_strtab_l1_desc(strtab, desc); + arm_smmu_write_strtab_l1_desc(strtab, l2ptr_dma); return 0; } @@ -1930,13 +2023,14 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, cmd->atc.size = log2_span; } -static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) +static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, + ioasid_t ssid) { int i; struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_batch cmds; - arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); + arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); cmds.num = 0; for (i = 0; i < master->num_streams; i++) { @@ -1947,13 +2041,13 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); } -int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, +int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, unsigned long iova, size_t size) { + struct arm_smmu_master_domain *master_domain; int i; unsigned long flags; struct arm_smmu_cmdq_ent cmd; - struct arm_smmu_master *master; struct arm_smmu_cmdq_batch cmds; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) @@ -1976,15 +2070,18 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, if (!atomic_read(&smmu_domain->nr_ats_masters)) return 0; - arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); - cmds.num = 0; spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) { + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + struct arm_smmu_master *master = master_domain->master; + if (!master->ats_enabled) continue; + arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, &cmd); + for (i = 0; i < master->num_streams; i++) { cmd.atc.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); @@ -2016,7 +2113,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); } - arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); + arm_smmu_atc_inv_domain(smmu_domain, 0, 0); } static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, @@ -2114,7 +2211,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, * Unfortunately, this can't be leaf-only since we may have * zapped an entire table. */ - arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size); + arm_smmu_atc_inv_domain(smmu_domain, iova, size); } void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, @@ -2155,6 +2252,13 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_add_page = arm_smmu_tlb_inv_page_nosync, }; +static bool arm_smmu_dbm_capable(struct arm_smmu_device *smmu) +{ + u32 features = (ARM_SMMU_FEAT_HD | ARM_SMMU_FEAT_COHERENCY); + + return (smmu->features & features) == features; +} + /* IOMMU API */ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) { @@ -2167,17 +2271,26 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) case IOMMU_CAP_NOEXEC: case IOMMU_CAP_DEFERRED_FLUSH: return true; + case IOMMU_CAP_DIRTY_TRACKING: + return arm_smmu_dbm_capable(master->smmu); default: return false; } } -static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) +struct arm_smmu_domain *arm_smmu_domain_alloc(void) { + struct arm_smmu_domain *smmu_domain; + + smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); + if (!smmu_domain) + return ERR_PTR(-ENOMEM); - if (type == IOMMU_DOMAIN_SVA) - return arm_smmu_sva_domain_alloc(); - return ERR_PTR(-EOPNOTSUPP); + mutex_init(&smmu_domain->init_mutex); + INIT_LIST_HEAD(&smmu_domain->devices); + spin_lock_init(&smmu_domain->devices_lock); + + return smmu_domain; } static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) @@ -2189,20 +2302,15 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) * We can't really do anything meaningful until we've added a * master. */ - smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); - if (!smmu_domain) - return ERR_PTR(-ENOMEM); - - mutex_init(&smmu_domain->init_mutex); - INIT_LIST_HEAD(&smmu_domain->devices); - spin_lock_init(&smmu_domain->devices_lock); - INIT_LIST_HEAD(&smmu_domain->mmu_notifiers); + smmu_domain = arm_smmu_domain_alloc(); + if (IS_ERR(smmu_domain)) + return ERR_CAST(smmu_domain); if (dev) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); int ret; - ret = arm_smmu_domain_finalise(smmu_domain, master->smmu); + ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, 0); if (ret) { kfree(smmu_domain); return ERR_PTR(ret); @@ -2211,7 +2319,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) return &smmu_domain->domain; } -static void arm_smmu_domain_free(struct iommu_domain *domain) +static void arm_smmu_domain_free_paging(struct iommu_domain *domain) { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -2222,7 +2330,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { /* Prevent SVA from touching the CD while we're freeing it */ mutex_lock(&arm_smmu_asid_lock); - arm_smmu_free_asid(&smmu_domain->cd); + xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); mutex_unlock(&arm_smmu_asid_lock); } else { struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; @@ -2234,45 +2342,23 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) } static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, - struct arm_smmu_domain *smmu_domain, - struct io_pgtable_cfg *pgtbl_cfg) + struct arm_smmu_domain *smmu_domain) { int ret; - u32 asid; + u32 asid = 0; struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; - typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr; - - refcount_set(&cd->refs, 1); /* Prevent SVA from modifying the ASID until it is written to the CD */ mutex_lock(&arm_smmu_asid_lock); - ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd, + ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); - if (ret) - goto out_unlock; - cd->asid = (u16)asid; - cd->ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; - cd->tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) | - FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) | - FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) | - FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) | - FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) | - FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) | - CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64; - cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair; - - mutex_unlock(&arm_smmu_asid_lock); - return 0; - -out_unlock: mutex_unlock(&arm_smmu_asid_lock); return ret; } static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, - struct arm_smmu_domain *smmu_domain, - struct io_pgtable_cfg *pgtbl_cfg) + struct arm_smmu_domain *smmu_domain) { int vmid; struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; @@ -2288,16 +2374,15 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, } static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, - struct arm_smmu_device *smmu) + struct arm_smmu_device *smmu, u32 flags) { int ret; - unsigned long ias, oas; enum io_pgtable_fmt fmt; struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_ops *pgtbl_ops; int (*finalise_stage_fn)(struct arm_smmu_device *smmu, - struct arm_smmu_domain *smmu_domain, - struct io_pgtable_cfg *pgtbl_cfg); + struct arm_smmu_domain *smmu_domain); + bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; /* Restrict the stage to what we can actually support */ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) @@ -2305,17 +2390,31 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + pgtbl_cfg = (struct io_pgtable_cfg) { + .pgsize_bitmap = smmu->pgsize_bitmap, + .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, + .tlb = &arm_smmu_flush_ops, + .iommu_dev = smmu->dev, + }; + switch (smmu_domain->stage) { - case ARM_SMMU_DOMAIN_S1: - ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; - ias = min_t(unsigned long, ias, VA_BITS); - oas = smmu->ias; + case ARM_SMMU_DOMAIN_S1: { + unsigned long ias = (smmu->features & + ARM_SMMU_FEAT_VAX) ? 52 : 48; + + pgtbl_cfg.ias = min_t(unsigned long, ias, VA_BITS); + pgtbl_cfg.oas = smmu->ias; + if (enable_dirty) + pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_ARM_HD; fmt = ARM_64_LPAE_S1; finalise_stage_fn = arm_smmu_domain_finalise_s1; break; + } case ARM_SMMU_DOMAIN_S2: - ias = smmu->ias; - oas = smmu->oas; + if (enable_dirty) + return -EOPNOTSUPP; + pgtbl_cfg.ias = smmu->ias; + pgtbl_cfg.oas = smmu->oas; fmt = ARM_64_LPAE_S2; finalise_stage_fn = arm_smmu_domain_finalise_s2; break; @@ -2323,15 +2422,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, return -EINVAL; } - pgtbl_cfg = (struct io_pgtable_cfg) { - .pgsize_bitmap = smmu->pgsize_bitmap, - .ias = ias, - .oas = oas, - .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, - .tlb = &arm_smmu_flush_ops, - .iommu_dev = smmu->dev, - }; - pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) return -ENOMEM; @@ -2339,8 +2429,10 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; smmu_domain->domain.geometry.force_aperture = true; + if (enable_dirty && smmu_domain->stage == ARM_SMMU_DOMAIN_S1) + smmu_domain->domain.dirty_ops = &arm_smmu_dirty_ops; - ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg); + ret = finalise_stage_fn(smmu, smmu_domain); if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); return ret; @@ -2376,6 +2468,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, int i, j; struct arm_smmu_device *smmu = master->smmu; + master->cd_table.in_ste = + FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(target->data[0])) == + STRTAB_STE_0_CFG_S1_TRANS; + master->ste_ats_enabled = + FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(target->data[1])) == + STRTAB_STE_1_EATS_TRANS; + for (i = 0; i < master->num_streams; ++i) { u32 sid = master->streams[i].id; struct arm_smmu_ste *step = @@ -2407,46 +2506,24 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master) return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)); } -static void arm_smmu_enable_ats(struct arm_smmu_master *master, - struct arm_smmu_domain *smmu_domain) +static void arm_smmu_enable_ats(struct arm_smmu_master *master) { size_t stu; struct pci_dev *pdev; struct arm_smmu_device *smmu = master->smmu; - /* Don't enable ATS at the endpoint if it's not enabled in the STE */ - if (!master->ats_enabled) - return; - /* Smallest Translation Unit: log2 of the smallest supported granule */ stu = __ffs(smmu->pgsize_bitmap); pdev = to_pci_dev(master->dev); - atomic_inc(&smmu_domain->nr_ats_masters); /* * ATC invalidation of PASID 0 causes the entire ATC to be flushed. */ - arm_smmu_atc_inv_master(master); + arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); if (pci_enable_ats(pdev, stu)) dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); } -static void arm_smmu_disable_ats(struct arm_smmu_master *master, - struct arm_smmu_domain *smmu_domain) -{ - if (!master->ats_enabled) - return; - - pci_disable_ats(to_pci_dev(master->dev)); - /* - * Ensure ATS is disabled at the endpoint before we issue the - * ATC invalidation via the SMMU. - */ - wmb(); - arm_smmu_atc_inv_master(master); - atomic_dec(&smmu_domain->nr_ats_masters); -} - static int arm_smmu_enable_pasid(struct arm_smmu_master *master) { int ret; @@ -2494,55 +2571,216 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master) pci_disable_pasid(pdev); } -static void arm_smmu_detach_dev(struct arm_smmu_master *master) +static struct arm_smmu_master_domain * +arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master *master, + ioasid_t ssid) { - struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev); - struct arm_smmu_domain *smmu_domain; + struct arm_smmu_master_domain *master_domain; + + lockdep_assert_held(&smmu_domain->devices_lock); + + list_for_each_entry(master_domain, &smmu_domain->devices, + devices_elm) { + if (master_domain->master == master && + master_domain->ssid == ssid) + return master_domain; + } + return NULL; +} + +/* + * If the domain uses the smmu_domain->devices list return the arm_smmu_domain + * structure, otherwise NULL. These domains track attached devices so they can + * issue invalidations. + */ +static struct arm_smmu_domain * +to_smmu_domain_devices(struct iommu_domain *domain) +{ + /* The domain can be NULL only when processing the first attach */ + if (!domain) + return NULL; + if ((domain->type & __IOMMU_DOMAIN_PAGING) || + domain->type == IOMMU_DOMAIN_SVA) + return to_smmu_domain(domain); + return NULL; +} + +static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, + struct iommu_domain *domain, + ioasid_t ssid) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain_devices(domain); + struct arm_smmu_master_domain *master_domain; unsigned long flags; - if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING)) + if (!smmu_domain) return; - smmu_domain = to_smmu_domain(domain); - arm_smmu_disable_ats(master, smmu_domain); - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_del_init(&master->domain_head); + master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid); + if (master_domain) { + list_del(&master_domain->devices_elm); + kfree(master_domain); + if (master->ats_enabled) + atomic_dec(&smmu_domain->nr_ats_masters); + } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); +} - master->ats_enabled = false; +struct arm_smmu_attach_state { + /* Inputs */ + struct iommu_domain *old_domain; + struct arm_smmu_master *master; + bool cd_needs_ats; + ioasid_t ssid; + /* Resulting state */ + bool ats_enabled; +}; + +/* + * Start the sequence to attach a domain to a master. The sequence contains three + * steps: + * arm_smmu_attach_prepare() + * arm_smmu_install_ste_for_dev() + * arm_smmu_attach_commit() + * + * If prepare succeeds then the sequence must be completed. The STE installed + * must set the STE.EATS field according to state.ats_enabled. + * + * If the device supports ATS then this determines if EATS should be enabled + * in the STE, and starts sequencing EATS disable if required. + * + * The change of the EATS in the STE and the PCI ATS config space is managed by + * this sequence to be in the right order so that if PCI ATS is enabled then + * STE.ETAS is enabled. + * + * new_domain can be a non-paging domain. In this case ATS will not be enabled, + * and invalidations won't be tracked. + */ +static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, + struct iommu_domain *new_domain) +{ + struct arm_smmu_master *master = state->master; + struct arm_smmu_master_domain *master_domain; + struct arm_smmu_domain *smmu_domain = + to_smmu_domain_devices(new_domain); + unsigned long flags; + + /* + * arm_smmu_share_asid() must not see two domains pointing to the same + * arm_smmu_master_domain contents otherwise it could randomly write one + * or the other to the CD. + */ + lockdep_assert_held(&arm_smmu_asid_lock); + + if (smmu_domain || state->cd_needs_ats) { + /* + * The SMMU does not support enabling ATS with bypass/abort. + * When the STE is in bypass (STE.Config[2:0] == 0b100), ATS + * Translation Requests and Translated transactions are denied + * as though ATS is disabled for the stream (STE.EATS == 0b00), + * causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events + * (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be + * enabled if we have arm_smmu_domain, those always have page + * tables. + */ + state->ats_enabled = arm_smmu_ats_supported(master); + } + + if (smmu_domain) { + master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL); + if (!master_domain) + return -ENOMEM; + master_domain->master = master; + master_domain->ssid = state->ssid; + + /* + * During prepare we want the current smmu_domain and new + * smmu_domain to be in the devices list before we change any + * HW. This ensures that both domains will send ATS + * invalidations to the master until we are done. + * + * It is tempting to make this list only track masters that are + * using ATS, but arm_smmu_share_asid() also uses this to change + * the ASID of a domain, unrelated to ATS. + * + * Notice if we are re-attaching the same domain then the list + * will have two identical entries and commit will remove only + * one of them. + */ + spin_lock_irqsave(&smmu_domain->devices_lock, flags); + if (state->ats_enabled) + atomic_inc(&smmu_domain->nr_ats_masters); + list_add(&master_domain->devices_elm, &smmu_domain->devices); + spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + } + + if (!state->ats_enabled && master->ats_enabled) { + pci_disable_ats(to_pci_dev(master->dev)); + /* + * This is probably overkill, but the config write for disabling + * ATS should complete before the STE is configured to generate + * UR to avoid AER noise. + */ + wmb(); + } + return 0; +} + +/* + * Commit is done after the STE/CD are configured with the EATS setting. It + * completes synchronizing the PCI device's ATC and finishes manipulating the + * smmu_domain->devices list. + */ +static void arm_smmu_attach_commit(struct arm_smmu_attach_state *state) +{ + struct arm_smmu_master *master = state->master; + + lockdep_assert_held(&arm_smmu_asid_lock); + + if (state->ats_enabled && !master->ats_enabled) { + arm_smmu_enable_ats(master); + } else if (state->ats_enabled && master->ats_enabled) { + /* + * The translation has changed, flush the ATC. At this point the + * SMMU is translating for the new domain and both the old&new + * domain will issue invalidations. + */ + arm_smmu_atc_inv_master(master, state->ssid); + } else if (!state->ats_enabled && master->ats_enabled) { + /* ATS is being switched off, invalidate the entire ATC */ + arm_smmu_atc_inv_master(master, IOMMU_NO_PASID); + } + master->ats_enabled = state->ats_enabled; + + arm_smmu_remove_master_domain(master, state->old_domain, state->ssid); } static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; - unsigned long flags; struct arm_smmu_ste target; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct arm_smmu_device *smmu; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_attach_state state = { + .old_domain = iommu_get_domain_for_dev(dev), + .ssid = IOMMU_NO_PASID, + }; struct arm_smmu_master *master; + struct arm_smmu_cd *cdptr; if (!fwspec) return -ENOENT; - master = dev_iommu_priv_get(dev); + state.master = master = dev_iommu_priv_get(dev); smmu = master->smmu; - /* - * Checking that SVA is disabled ensures that this device isn't bound to - * any mm, and can be safely detached from its old domain. Bonds cannot - * be removed concurrently since we're holding the group mutex. - */ - if (arm_smmu_master_sva_enabled(master)) { - dev_err(dev, "cannot attach - SVA enabled\n"); - return -EBUSY; - } - mutex_lock(&smmu_domain->init_mutex); if (!smmu_domain->smmu) { - ret = arm_smmu_domain_finalise(smmu_domain, smmu); + ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0); } else if (smmu_domain->smmu != smmu) ret = -EINVAL; @@ -2550,6 +2788,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (ret) return ret; + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID); + if (!cdptr) + return -ENOMEM; + } else if (arm_smmu_ssids_in_use(&master->cd_table)) + return -EBUSY; + /* * Prevent arm_smmu_share_asid() from trying to change the ASID * of either the old or new domain while we are working on it. @@ -2558,67 +2803,185 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) */ mutex_lock(&arm_smmu_asid_lock); - arm_smmu_detach_dev(master); - - master->ats_enabled = arm_smmu_ats_supported(master); - - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_add(&master->domain_head, &smmu_domain->devices); - spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + ret = arm_smmu_attach_prepare(&state, domain); + if (ret) { + mutex_unlock(&arm_smmu_asid_lock); + return ret; + } switch (smmu_domain->stage) { - case ARM_SMMU_DOMAIN_S1: - if (!master->cd_table.cdtab) { - ret = arm_smmu_alloc_cd_tables(master); - if (ret) - goto out_list_del; - } else { - /* - * arm_smmu_write_ctx_desc() relies on the entry being - * invalid to work, clear any existing entry. - */ - ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, - NULL); - if (ret) - goto out_list_del; - } - - ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd); - if (ret) - goto out_list_del; - - arm_smmu_make_cdtable_ste(&target, master); + case ARM_SMMU_DOMAIN_S1: { + struct arm_smmu_cd target_cd; + + arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); + arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr, + &target_cd); + arm_smmu_make_cdtable_ste(&target, master, state.ats_enabled, + STRTAB_STE_1_S1DSS_SSID0); arm_smmu_install_ste_for_dev(master, &target); break; + } case ARM_SMMU_DOMAIN_S2: - arm_smmu_make_s2_domain_ste(&target, master, smmu_domain); + arm_smmu_make_s2_domain_ste(&target, master, smmu_domain, + state.ats_enabled); arm_smmu_install_ste_for_dev(master, &target); - if (master->cd_table.cdtab) - arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, - NULL); + arm_smmu_clear_cd(master, IOMMU_NO_PASID); break; } - arm_smmu_enable_ats(master, smmu_domain); - goto out_unlock; + arm_smmu_attach_commit(&state); + mutex_unlock(&arm_smmu_asid_lock); + return 0; +} -out_list_del: - spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_del_init(&master->domain_head); - spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); +static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t id) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_device *smmu = master->smmu; + struct arm_smmu_cd target_cd; + int ret = 0; + + mutex_lock(&smmu_domain->init_mutex); + if (!smmu_domain->smmu) + ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0); + else if (smmu_domain->smmu != smmu) + ret = -EINVAL; + mutex_unlock(&smmu_domain->init_mutex); + if (ret) + return ret; + + if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + return -EINVAL; + + /* + * We can read cd.asid outside the lock because arm_smmu_set_pasid() + * will fix it + */ + arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); + return arm_smmu_set_pasid(master, to_smmu_domain(domain), id, + &target_cd); +} + +static void arm_smmu_update_ste(struct arm_smmu_master *master, + struct iommu_domain *sid_domain, + bool ats_enabled) +{ + unsigned int s1dss = STRTAB_STE_1_S1DSS_TERMINATE; + struct arm_smmu_ste ste; + + if (master->cd_table.in_ste && master->ste_ats_enabled == ats_enabled) + return; + + if (sid_domain->type == IOMMU_DOMAIN_IDENTITY) + s1dss = STRTAB_STE_1_S1DSS_BYPASS; + else + WARN_ON(sid_domain->type != IOMMU_DOMAIN_BLOCKED); + + /* + * Change the STE into a cdtable one with SID IDENTITY/BLOCKED behavior + * using s1dss if necessary. If the cd_table is already installed then + * the S1DSS is correct and this will just update the EATS. Otherwise it + * installs the entire thing. This will be hitless. + */ + arm_smmu_make_cdtable_ste(&ste, master, ats_enabled, s1dss); + arm_smmu_install_ste_for_dev(master, &ste); +} + +int arm_smmu_set_pasid(struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, ioasid_t pasid, + struct arm_smmu_cd *cd) +{ + struct iommu_domain *sid_domain = iommu_get_domain_for_dev(master->dev); + struct arm_smmu_attach_state state = { + .master = master, + /* + * For now the core code prevents calling this when a domain is + * already attached, no need to set old_domain. + */ + .ssid = pasid, + }; + struct arm_smmu_cd *cdptr; + int ret; + + /* The core code validates pasid */ + + if (smmu_domain->smmu != master->smmu) + return -EINVAL; + + if (!master->cd_table.in_ste && + sid_domain->type != IOMMU_DOMAIN_IDENTITY && + sid_domain->type != IOMMU_DOMAIN_BLOCKED) + return -EINVAL; + + cdptr = arm_smmu_alloc_cd_ptr(master, pasid); + if (!cdptr) + return -ENOMEM; + + mutex_lock(&arm_smmu_asid_lock); + ret = arm_smmu_attach_prepare(&state, &smmu_domain->domain); + if (ret) + goto out_unlock; + + /* + * We don't want to obtain to the asid_lock too early, so fix up the + * caller set ASID under the lock in case it changed. + */ + cd->data[0] &= ~cpu_to_le64(CTXDESC_CD_0_ASID); + cd->data[0] |= cpu_to_le64( + FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid)); + + arm_smmu_write_cd_entry(master, pasid, cdptr, cd); + arm_smmu_update_ste(master, sid_domain, state.ats_enabled); + + arm_smmu_attach_commit(&state); out_unlock: mutex_unlock(&arm_smmu_asid_lock); return ret; } -static int arm_smmu_attach_dev_ste(struct device *dev, - struct arm_smmu_ste *ste) +static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_domain *smmu_domain; - if (arm_smmu_master_sva_enabled(master)) - return -EBUSY; + smmu_domain = to_smmu_domain(domain); + + mutex_lock(&arm_smmu_asid_lock); + arm_smmu_clear_cd(master, pasid); + if (master->ats_enabled) + arm_smmu_atc_inv_master(master, pasid); + arm_smmu_remove_master_domain(master, &smmu_domain->domain, pasid); + mutex_unlock(&arm_smmu_asid_lock); + + /* + * When the last user of the CD table goes away downgrade the STE back + * to a non-cd_table one. + */ + if (!arm_smmu_ssids_in_use(&master->cd_table)) { + struct iommu_domain *sid_domain = + iommu_get_domain_for_dev(master->dev); + + if (sid_domain->type == IOMMU_DOMAIN_IDENTITY || + sid_domain->type == IOMMU_DOMAIN_BLOCKED) + sid_domain->ops->attach_dev(sid_domain, dev); + } +} + +static void arm_smmu_attach_dev_ste(struct iommu_domain *domain, + struct device *dev, + struct arm_smmu_ste *ste, + unsigned int s1dss) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct arm_smmu_attach_state state = { + .master = master, + .old_domain = iommu_get_domain_for_dev(dev), + .ssid = IOMMU_NO_PASID, + }; /* * Do not allow any ASID to be changed while are working on the STE, @@ -2627,15 +2990,23 @@ static int arm_smmu_attach_dev_ste(struct device *dev, mutex_lock(&arm_smmu_asid_lock); /* - * The SMMU does not support enabling ATS with bypass/abort. When the - * STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests - * and Translated transactions are denied as though ATS is disabled for - * the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and - * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry). + * If the CD table is not in use we can use the provided STE, otherwise + * we use a cdtable STE with the provided S1DSS. */ - arm_smmu_detach_dev(master); - + if (arm_smmu_ssids_in_use(&master->cd_table)) { + /* + * If a CD table has to be present then we need to run with ATS + * on even though the RID will fail ATS queries with UR. This is + * because we have no idea what the PASID's need. + */ + state.cd_needs_ats = true; + arm_smmu_attach_prepare(&state, domain); + arm_smmu_make_cdtable_ste(ste, master, state.ats_enabled, s1dss); + } else { + arm_smmu_attach_prepare(&state, domain); + } arm_smmu_install_ste_for_dev(master, ste); + arm_smmu_attach_commit(&state); mutex_unlock(&arm_smmu_asid_lock); /* @@ -2643,9 +3014,7 @@ static int arm_smmu_attach_dev_ste(struct device *dev, * arm_smmu_domain->devices to avoid races updating the same context * descriptor from arm_smmu_share_asid(). */ - if (master->cd_table.cdtab) - arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL); - return 0; + arm_smmu_clear_cd(master, IOMMU_NO_PASID); } static int arm_smmu_attach_dev_identity(struct iommu_domain *domain, @@ -2655,7 +3024,8 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain, struct arm_smmu_master *master = dev_iommu_priv_get(dev); arm_smmu_make_bypass_ste(master->smmu, &ste); - return arm_smmu_attach_dev_ste(dev, &ste); + arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS); + return 0; } static const struct iommu_domain_ops arm_smmu_identity_ops = { @@ -2673,7 +3043,9 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain, struct arm_smmu_ste ste; arm_smmu_make_abort_ste(&ste); - return arm_smmu_attach_dev_ste(dev, &ste); + arm_smmu_attach_dev_ste(domain, dev, &ste, + STRTAB_STE_1_S1DSS_TERMINATE); + return 0; } static const struct iommu_domain_ops arm_smmu_blocked_ops = { @@ -2685,6 +3057,37 @@ static struct iommu_domain arm_smmu_blocked_domain = { .ops = &arm_smmu_blocked_ops, }; +static struct iommu_domain * +arm_smmu_domain_alloc_user(struct device *dev, u32 flags, + struct iommu_domain *parent, + const struct iommu_user_data *user_data) +{ + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + struct arm_smmu_domain *smmu_domain; + int ret; + + if (flags & ~PAGING_FLAGS) + return ERR_PTR(-EOPNOTSUPP); + if (parent || user_data) + return ERR_PTR(-EOPNOTSUPP); + + smmu_domain = arm_smmu_domain_alloc(); + if (!smmu_domain) + return ERR_PTR(-ENOMEM); + + smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; + smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; + ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags); + if (ret) + goto err_free; + return &smmu_domain->domain; + +err_free: + kfree(smmu_domain); + return ERR_PTR(ret); +} + static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -2849,8 +3252,6 @@ static void arm_smmu_remove_master(struct arm_smmu_master *master) kfree(master->streams); } -static struct iommu_ops arm_smmu_ops; - static struct iommu_device *arm_smmu_probe_device(struct device *dev) { int ret; @@ -2871,8 +3272,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) master->dev = dev; master->smmu = smmu; - INIT_LIST_HEAD(&master->bonds); - INIT_LIST_HEAD(&master->domain_head); dev_iommu_priv_set(dev, master); ret = arm_smmu_insert_master(smmu, master); @@ -2928,6 +3327,27 @@ static void arm_smmu_release_device(struct device *dev) kfree(master); } +static int arm_smmu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + + return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); +} + +static int arm_smmu_set_dirty_tracking(struct iommu_domain *domain, + bool enabled) +{ + /* + * Always enabled and the dirty bitmap is cleared prior to + * set_dirty_tracking(). + */ + return 0; +} + static struct iommu_group *arm_smmu_device_group(struct device *dev) { struct iommu_group *group; @@ -3054,18 +3474,13 @@ static int arm_smmu_def_domain_type(struct device *dev) return 0; } -static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid, - struct iommu_domain *domain) -{ - arm_smmu_sva_remove_dev_pasid(domain, dev, pasid); -} - static struct iommu_ops arm_smmu_ops = { .identity_domain = &arm_smmu_identity_domain, .blocked_domain = &arm_smmu_blocked_domain, .capable = arm_smmu_capable, - .domain_alloc = arm_smmu_domain_alloc, .domain_alloc_paging = arm_smmu_domain_alloc_paging, + .domain_alloc_sva = arm_smmu_sva_domain_alloc, + .domain_alloc_user = arm_smmu_domain_alloc_user, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, @@ -3080,16 +3495,22 @@ static struct iommu_ops arm_smmu_ops = { .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, + .set_dev_pasid = arm_smmu_s1_set_dev_pasid, .map_pages = arm_smmu_map_pages, .unmap_pages = arm_smmu_unmap_pages, .flush_iotlb_all = arm_smmu_flush_iotlb_all, .iotlb_sync = arm_smmu_iotlb_sync, .iova_to_phys = arm_smmu_iova_to_phys, .enable_nesting = arm_smmu_enable_nesting, - .free = arm_smmu_domain_free, + .free = arm_smmu_domain_free_paging, } }; +static struct iommu_dirty_ops arm_smmu_dirty_ops = { + .read_and_clear_dirty = arm_smmu_read_and_clear_dirty, + .set_dirty_tracking = arm_smmu_set_dirty_tracking, +}; + /* Probing and initialisation functions */ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, struct arm_smmu_queue *q, @@ -3188,25 +3609,6 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) PRIQ_ENT_DWORDS, "priq"); } -static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) -{ - unsigned int i; - struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - void *strtab = smmu->strtab_cfg.strtab; - - cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents, - sizeof(*cfg->l1_desc), GFP_KERNEL); - if (!cfg->l1_desc) - return -ENOMEM; - - for (i = 0; i < cfg->num_l1_ents; ++i) { - arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]); - strtab += STRTAB_L1_DESC_DWORDS << 3; - } - - return 0; -} - static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; @@ -3242,7 +3644,12 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); cfg->strtab_base_cfg = reg; - return arm_smmu_init_l1_strtab(smmu); + cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents, + sizeof(*cfg->l1_desc), GFP_KERNEL); + if (!cfg->l1_desc) + return -ENOMEM; + + return 0; } static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) @@ -3665,6 +4072,28 @@ static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) } } +static void arm_smmu_get_httu(struct arm_smmu_device *smmu, u32 reg) +{ + u32 fw_features = smmu->features & (ARM_SMMU_FEAT_HA | ARM_SMMU_FEAT_HD); + u32 hw_features = 0; + + switch (FIELD_GET(IDR0_HTTU, reg)) { + case IDR0_HTTU_ACCESS_DIRTY: + hw_features |= ARM_SMMU_FEAT_HD; + fallthrough; + case IDR0_HTTU_ACCESS: + hw_features |= ARM_SMMU_FEAT_HA; + } + + if (smmu->dev->of_node) + smmu->features |= hw_features; + else if (hw_features != fw_features) + /* ACPI IORT sets the HTTU bits */ + dev_warn(smmu->dev, + "IDR0.HTTU features(0x%x) overridden by FW configuration (0x%x)\n", + hw_features, fw_features); +} + static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) { u32 reg; @@ -3725,6 +4154,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->features |= ARM_SMMU_FEAT_E2H; } + arm_smmu_get_httu(smmu, reg); + /* * The coherency feature as set by FW is used in preference to the ID * register, but warn on mismatch. @@ -3920,6 +4351,14 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) smmu->features |= ARM_SMMU_FEAT_COHERENCY; + switch (FIELD_GET(ACPI_IORT_SMMU_V3_HTTU_OVERRIDE, iort_smmu->flags)) { + case IDR0_HTTU_ACCESS_DIRTY: + smmu->features |= ARM_SMMU_FEAT_HD; + fallthrough; + case IDR0_HTTU_ACCESS: + smmu->features |= ARM_SMMU_FEAT_HA; + } + return 0; } #else diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 2da767a542ff4d58a1f60cbec494a8ee1c1c2c02..b332536a00d438cc5fc6b42aa7c3e6b0a51870e0 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -33,6 +33,9 @@ #define IDR0_ASID16 (1 << 12) #define IDR0_ATS (1 << 10) #define IDR0_HYP (1 << 9) +#define IDR0_HTTU GENMASK(7, 6) +#define IDR0_HTTU_ACCESS 1 +#define IDR0_HTTU_ACCESS_DIRTY 2 #define IDR0_COHACC (1 << 4) #define IDR0_TTF GENMASK(3, 2) #define IDR0_TTF_AARCH64 2 @@ -275,8 +278,7 @@ struct arm_smmu_ste { * 2lvl: at most 1024 L1 entries, * 1024 lazy entries per table. */ -#define CTXDESC_SPLIT 10 -#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT) +#define CTXDESC_L2_ENTRIES 1024 #define CTXDESC_L1_DESC_DWORDS 1 #define CTXDESC_L1_DESC_V (1UL << 0) @@ -302,6 +304,9 @@ struct arm_smmu_cd { #define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32) #define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38) +#define CTXDESC_CD_0_TCR_HA (1UL << 43) +#define CTXDESC_CD_0_TCR_HD (1UL << 42) + #define CTXDESC_CD_0_AA64 (1UL << 41) #define CTXDESC_CD_0_S (1UL << 44) #define CTXDESC_CD_0_R (1UL << 45) @@ -580,20 +585,11 @@ struct arm_smmu_priq { /* High-level stream table and context descriptor structures */ struct arm_smmu_strtab_l1_desc { - u8 span; - struct arm_smmu_ste *l2ptr; - dma_addr_t l2ptr_dma; }; struct arm_smmu_ctx_desc { u16 asid; - u64 ttbr; - u64 tcr; - u64 mair; - - refcount_t refs; - struct mm_struct *mm; }; struct arm_smmu_l1_ctx_desc { @@ -606,13 +602,19 @@ struct arm_smmu_ctx_desc_cfg { dma_addr_t cdtab_dma; struct arm_smmu_l1_ctx_desc *l1_desc; unsigned int num_l1_ents; + unsigned int used_ssids; + u8 in_ste; u8 s1fmt; /* log2 of the maximum number of CDs supported by this table */ u8 s1cdmax; - /* Whether CD entries in this table have the stall bit set. */ - u8 stall_enabled:1; }; +/* True if the cd table has SSIDS > 0 in use. */ +static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table) +{ + return cd_table->used_ssids; +} + struct arm_smmu_s2_cfg { u16 vmid; }; @@ -654,6 +656,8 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_E2H (1 << 18) #define ARM_SMMU_FEAT_NESTING (1 << 19) #define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20) +#define ARM_SMMU_FEAT_HA (1 << 21) +#define ARM_SMMU_FEAT_HD (1 << 22) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) @@ -702,16 +706,15 @@ struct arm_smmu_stream { struct arm_smmu_master { struct arm_smmu_device *smmu; struct device *dev; - struct list_head domain_head; struct arm_smmu_stream *streams; /* Locked by the iommu core using the group mutex */ struct arm_smmu_ctx_desc_cfg cd_table; unsigned int num_streams; - bool ats_enabled; + bool ats_enabled : 1; + bool ste_ats_enabled : 1; bool stall_enabled; bool sva_enabled; bool iopf_enabled; - struct list_head bonds; unsigned int ssid_bits; }; @@ -736,10 +739,49 @@ struct arm_smmu_domain { struct iommu_domain domain; + /* List of struct arm_smmu_master_domain */ struct list_head devices; spinlock_t devices_lock; - struct list_head mmu_notifiers; + struct mmu_notifier mmu_notifier; +}; + +/* The following are exposed for testing purposes. */ +struct arm_smmu_entry_writer_ops; +struct arm_smmu_entry_writer { + const struct arm_smmu_entry_writer_ops *ops; + struct arm_smmu_master *master; +}; + +struct arm_smmu_entry_writer_ops { + void (*get_used)(const __le64 *entry, __le64 *used); + void (*sync)(struct arm_smmu_entry_writer *writer); +}; + +#if IS_ENABLED(CONFIG_KUNIT) +void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits); +void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur, + const __le64 *target); +void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits); +void arm_smmu_make_abort_ste(struct arm_smmu_ste *target); +void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, + struct arm_smmu_ste *target); +void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, bool ats_enabled, + unsigned int s1dss); +void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, + bool ats_enabled); +void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, struct mm_struct *mm, + u16 asid); +#endif + +struct arm_smmu_master_domain { + struct list_head devices_elm; + struct arm_smmu_master *master; + ioasid_t ssid; }; static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) @@ -749,16 +791,28 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) extern struct xarray arm_smmu_asid_xa; extern struct mutex arm_smmu_asid_lock; -extern struct arm_smmu_ctx_desc quiet_cd; -int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid, - struct arm_smmu_ctx_desc *cd); +struct arm_smmu_domain *arm_smmu_domain_alloc(void); + +void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid); +struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, + u32 ssid); +void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain); +void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, + struct arm_smmu_cd *cdptr, + const struct arm_smmu_cd *target); + +int arm_smmu_set_pasid(struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain, ioasid_t pasid, + struct arm_smmu_cd *cd); + void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, size_t granule, bool leaf, struct arm_smmu_domain *smmu_domain); -bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); -int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, +int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, unsigned long iova, size_t size); #ifdef CONFIG_ARM_SMMU_V3_SVA @@ -769,9 +823,8 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master); int arm_smmu_master_disable_sva(struct arm_smmu_master *master); bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master); void arm_smmu_sva_notifier_synchronize(void); -struct iommu_domain *arm_smmu_sva_domain_alloc(void); -void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t id); +struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm); #else /* CONFIG_ARM_SMMU_V3_SVA */ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { @@ -805,10 +858,7 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master static inline void arm_smmu_sva_notifier_synchronize(void) {} -static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void) -{ - return NULL; -} +#define arm_smmu_sva_domain_alloc NULL static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, struct device *dev, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 87bf522b9d2eec034d66bf6c5d144384ae7093fa..4b2994b6126df5e2b9bf73b42d204d1660ccaa81 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -200,7 +200,7 @@ static irqreturn_t nvidia_smmu_context_fault_bank(int irq, void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx); fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); - if (!(fsr & ARM_SMMU_FSR_FAULT)) + if (!(fsr & ARM_SMMU_CB_FSR_FAULT)) return IRQ_NONE; fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); @@ -221,11 +221,9 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev) unsigned int inst; irqreturn_t ret = IRQ_NONE; struct arm_smmu_device *smmu; - struct iommu_domain *domain = dev; - struct arm_smmu_domain *smmu_domain; + struct arm_smmu_domain *smmu_domain = dev; struct nvidia_smmu *nvidia; - smmu_domain = container_of(domain, struct arm_smmu_domain, domain); smmu = smmu_domain->smmu; nvidia = to_nvidia_smmu(smmu); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c index 552199cbd9e25dfba1f9db2a9799ed13dfc06ca2..548783f3f8e89fd978367afa65c473002f66e2e7 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c @@ -141,7 +141,7 @@ static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_doma writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); - if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) { + if ((fsr & ARM_SMMU_CB_FSR_FAULT) && (fsr & ARM_SMMU_CB_FSR_SS)) { u32 sctlr_orig, sctlr; /* @@ -298,7 +298,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain, arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr); fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); - if (fsr & ARM_SMMU_FSR_FAULT) { + if (fsr & ARM_SMMU_CB_FSR_FAULT) { /* Clear pending interrupts */ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); @@ -306,7 +306,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain, * TBU halt takes care of resuming any stalled transcation. * Kept it here for completeness sake. */ - if (fsr & ARM_SMMU_FSR_SS) + if (fsr & ARM_SMMU_CB_FSR_SS) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE); } @@ -320,11 +320,11 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain, phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid); fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); - if (fsr & ARM_SMMU_FSR_FAULT) { + if (fsr & ARM_SMMU_CB_FSR_FAULT) { /* Clear pending interrupts */ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); - if (fsr & ARM_SMMU_FSR_SS) + if (fsr & ARM_SMMU_CB_FSR_SS) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE); } @@ -383,68 +383,44 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) struct arm_smmu_domain *smmu_domain = dev; struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; struct arm_smmu_device *smmu = smmu_domain->smmu; - u32 fsr, fsynr, cbfrsynra, resume = 0; + struct arm_smmu_context_fault_info cfi; + u32 resume = 0; int idx = smmu_domain->cfg.cbndx; phys_addr_t phys_soft; - unsigned long iova; int ret, tmp; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); - fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); - if (!(fsr & ARM_SMMU_FSR_FAULT)) - return IRQ_NONE; + arm_smmu_read_context_fault_info(smmu, idx, &cfi); - fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); - iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); - cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); + if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT)) + return IRQ_NONE; if (list_empty(&tbu_list)) { - ret = report_iommu_fault(&smmu_domain->domain, NULL, iova, - fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); + ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova, + cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); if (ret == -ENOSYS) - dev_err_ratelimited(smmu->dev, - "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", - fsr, iova, fsynr, cbfrsynra, idx); + arm_smmu_print_context_fault_info(smmu, idx, &cfi); - arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr); return IRQ_HANDLED; } - phys_soft = ops->iova_to_phys(ops, iova); + phys_soft = ops->iova_to_phys(ops, cfi.iova); - tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova, - fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); + tmp = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova, + cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); if (!tmp || tmp == -EBUSY) { - dev_dbg(smmu->dev, - "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n", - iova, fsr, fsynr, idx); - dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft); ret = IRQ_HANDLED; resume = ARM_SMMU_RESUME_TERMINATE; } else { - phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr); + phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr); if (__ratelimit(&_rs)) { - dev_err(smmu->dev, - "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", - fsr, iova, fsynr, cbfrsynra, idx); - dev_err(smmu->dev, - "FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n", - fsr, - (fsr & 0x02) ? "TF " : "", - (fsr & 0x04) ? "AFF " : "", - (fsr & 0x08) ? "PF " : "", - (fsr & 0x10) ? "EF " : "", - (fsr & 0x20) ? "TLBMCF " : "", - (fsr & 0x40) ? "TLBLKF " : "", - (fsr & 0x80) ? "MHF " : "", - (fsr & 0x40000000) ? "SS " : "", - (fsr & 0x80000000) ? "MULTI " : "", - cbfrsynra); + arm_smmu_print_context_fault_info(smmu, idx, &cfi); dev_err(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft); @@ -478,17 +454,17 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) */ if (tmp != -EBUSY) { /* Clear the faulting FSR */ - arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr); /* Retry or terminate any stalled transactions */ - if (fsr & ARM_SMMU_FSR_SS) + if (cfi.fsr & ARM_SMMU_CB_FSR_SS) arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume); } return ret; } -static int qcom_tbu_probe(struct platform_device *pdev) +int qcom_tbu_probe(struct platform_device *pdev) { struct of_phandle_args args = { .args_count = 2 }; struct device_node *np = pdev->dev.of_node; @@ -530,18 +506,3 @@ static int qcom_tbu_probe(struct platform_device *pdev) return 0; } - -static const struct of_device_id qcom_tbu_of_match[] = { - { .compatible = "qcom,sc7280-tbu" }, - { .compatible = "qcom,sdm845-tbu" }, - { } -}; - -static struct platform_driver qcom_tbu_driver = { - .driver = { - .name = "qcom_tbu", - .of_match_table = qcom_tbu_of_match, - }, - .probe = qcom_tbu_probe, -}; -builtin_platform_driver(qcom_tbu_driver); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index ee318cd0e6f1f1ef4d80c41d2698e989f5dcd58b..601fb878d0ef25646df434c31304d7ead43c9b05 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include "arm-smmu.h" #include "arm-smmu-qcom.h" @@ -497,7 +499,8 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, /* Check to make sure qcom_scm has finished probing */ if (!qcom_scm_is_available()) - return ERR_PTR(-EPROBE_DEFER); + return ERR_PTR(dev_err_probe(smmu->dev, -EPROBE_DEFER, + "qcom_scm not ready\n")); qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL); if (!qsmmu) @@ -590,10 +593,47 @@ static struct acpi_platform_list qcom_acpi_platlist[] = { }; #endif +static int qcom_smmu_tbu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) { + ret = qcom_tbu_probe(pdev); + if (ret) + return ret; + } + + if (dev->pm_domain) { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return 0; +} + +static const struct of_device_id qcom_smmu_tbu_of_match[] = { + { .compatible = "qcom,sc7280-tbu" }, + { .compatible = "qcom,sdm845-tbu" }, + { } +}; + +static struct platform_driver qcom_smmu_tbu_driver = { + .driver = { + .name = "qcom_tbu", + .of_match_table = qcom_smmu_tbu_of_match, + }, + .probe = qcom_smmu_tbu_probe, +}; + struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) { const struct device_node *np = smmu->dev->of_node; const struct of_device_id *match; + static u8 tbu_registered; + + if (!tbu_registered++) + platform_driver_register(&qcom_smmu_tbu_driver); #ifdef CONFIG_ACPI if (np == NULL) { diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h index 9bb3ae7d62da68600181268026a19768cce9c387..3c134d1a62773ed99133510ee1f82bd08d86e623 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h @@ -34,8 +34,10 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev); #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu); +int qcom_tbu_probe(struct platform_device *pdev); #else static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { } +static inline int qcom_tbu_probe(struct platform_device *pdev) { return -EINVAL; } #endif #endif /* _ARM_SMMU_QCOM_H */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index bd09113cdbc652eb98c7cc6366af2ff66767ba48..a7f7c69a5627ab75d4e47d93c3abdb46bf40e851 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -183,8 +183,7 @@ static int arm_smmu_register_legacy_master(struct device *dev, it.cur_count = 1; } - err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, - &arm_smmu_ops); + err = iommu_fwspec_init(dev, NULL); if (err) return err; @@ -410,32 +409,72 @@ static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }; + +void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx, + struct arm_smmu_context_fault_info *cfi) +{ + cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); + cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); + cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); + cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); +} + +void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx, + const struct arm_smmu_context_fault_info *cfi) +{ + dev_dbg(smmu->dev, + "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", + cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx); + + dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n", + cfi->fsr, + (cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "", + (cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "", + (u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr), + (cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "", + (cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "", + cfi->cbfrsynra); + + dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n", + cfi->fsynr, + (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr), + (cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "", + (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "", + (cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "", + (cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "", + (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "", + (cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "", + (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr)); +} + static irqreturn_t arm_smmu_context_fault(int irq, void *dev) { - u32 fsr, fsynr, cbfrsynra; - unsigned long iova; + struct arm_smmu_context_fault_info cfi; struct arm_smmu_domain *smmu_domain = dev; struct arm_smmu_device *smmu = smmu_domain->smmu; + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); int idx = smmu_domain->cfg.cbndx; int ret; - fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); - if (!(fsr & ARM_SMMU_FSR_FAULT)) - return IRQ_NONE; + arm_smmu_read_context_fault_info(smmu, idx, &cfi); - fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); - iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); - cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); + if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT)) + return IRQ_NONE; - ret = report_iommu_fault(&smmu_domain->domain, NULL, iova, - fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); + ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova, + cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); - if (ret == -ENOSYS) - dev_err_ratelimited(smmu->dev, - "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", - fsr, iova, fsynr, cbfrsynra, idx); + if (ret == -ENOSYS && __ratelimit(&rs)) + arm_smmu_print_context_fault_info(smmu, idx, &cfi); - arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr); return IRQ_HANDLED; } @@ -1311,7 +1350,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; - if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE), + if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_CB_ATSR_ACTIVE), 5, 50)) { spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); dev_err(dev, @@ -1676,7 +1715,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) /* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) { arm_smmu_write_context_bank(smmu, i); - arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); + arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT); } /* Invalidate the TLB, just in case */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 4765c6945c344dec36d250fde94d9134c0fe194a..e2aeb511ae903302e3c15d2cf5f22e2a26ac2346 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -196,34 +196,42 @@ enum arm_smmu_cbar_type { #define ARM_SMMU_CB_PAR_F BIT(0) #define ARM_SMMU_CB_FSR 0x58 -#define ARM_SMMU_FSR_MULTI BIT(31) -#define ARM_SMMU_FSR_SS BIT(30) -#define ARM_SMMU_FSR_UUT BIT(8) -#define ARM_SMMU_FSR_ASF BIT(7) -#define ARM_SMMU_FSR_TLBLKF BIT(6) -#define ARM_SMMU_FSR_TLBMCF BIT(5) -#define ARM_SMMU_FSR_EF BIT(4) -#define ARM_SMMU_FSR_PF BIT(3) -#define ARM_SMMU_FSR_AFF BIT(2) -#define ARM_SMMU_FSR_TF BIT(1) - -#define ARM_SMMU_FSR_IGN (ARM_SMMU_FSR_AFF | \ - ARM_SMMU_FSR_ASF | \ - ARM_SMMU_FSR_TLBMCF | \ - ARM_SMMU_FSR_TLBLKF) - -#define ARM_SMMU_FSR_FAULT (ARM_SMMU_FSR_MULTI | \ - ARM_SMMU_FSR_SS | \ - ARM_SMMU_FSR_UUT | \ - ARM_SMMU_FSR_EF | \ - ARM_SMMU_FSR_PF | \ - ARM_SMMU_FSR_TF | \ - ARM_SMMU_FSR_IGN) +#define ARM_SMMU_CB_FSR_MULTI BIT(31) +#define ARM_SMMU_CB_FSR_SS BIT(30) +#define ARM_SMMU_CB_FSR_FORMAT GENMASK(10, 9) +#define ARM_SMMU_CB_FSR_UUT BIT(8) +#define ARM_SMMU_CB_FSR_ASF BIT(7) +#define ARM_SMMU_CB_FSR_TLBLKF BIT(6) +#define ARM_SMMU_CB_FSR_TLBMCF BIT(5) +#define ARM_SMMU_CB_FSR_EF BIT(4) +#define ARM_SMMU_CB_FSR_PF BIT(3) +#define ARM_SMMU_CB_FSR_AFF BIT(2) +#define ARM_SMMU_CB_FSR_TF BIT(1) + +#define ARM_SMMU_CB_FSR_IGN (ARM_SMMU_CB_FSR_AFF | \ + ARM_SMMU_CB_FSR_ASF | \ + ARM_SMMU_CB_FSR_TLBMCF | \ + ARM_SMMU_CB_FSR_TLBLKF) + +#define ARM_SMMU_CB_FSR_FAULT (ARM_SMMU_CB_FSR_MULTI | \ + ARM_SMMU_CB_FSR_SS | \ + ARM_SMMU_CB_FSR_UUT | \ + ARM_SMMU_CB_FSR_EF | \ + ARM_SMMU_CB_FSR_PF | \ + ARM_SMMU_CB_FSR_TF | \ + ARM_SMMU_CB_FSR_IGN) #define ARM_SMMU_CB_FAR 0x60 #define ARM_SMMU_CB_FSYNR0 0x68 -#define ARM_SMMU_FSYNR0_WNR BIT(4) +#define ARM_SMMU_CB_FSYNR0_PLVL GENMASK(1, 0) +#define ARM_SMMU_CB_FSYNR0_WNR BIT(4) +#define ARM_SMMU_CB_FSYNR0_PNU BIT(5) +#define ARM_SMMU_CB_FSYNR0_IND BIT(6) +#define ARM_SMMU_CB_FSYNR0_NSATTR BIT(8) +#define ARM_SMMU_CB_FSYNR0_PTWF BIT(10) +#define ARM_SMMU_CB_FSYNR0_AFR BIT(11) +#define ARM_SMMU_CB_FSYNR0_S1CBNDX GENMASK(23, 16) #define ARM_SMMU_CB_FSYNR1 0x6c @@ -237,7 +245,7 @@ enum arm_smmu_cbar_type { #define ARM_SMMU_CB_ATS1PR 0x800 #define ARM_SMMU_CB_ATSR 0x8f0 -#define ARM_SMMU_ATSR_ACTIVE BIT(0) +#define ARM_SMMU_CB_ATSR_ACTIVE BIT(0) #define ARM_SMMU_RESUME_TERMINATE BIT(0) @@ -533,4 +541,17 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx); int arm_mmu500_reset(struct arm_smmu_device *smmu); +struct arm_smmu_context_fault_info { + unsigned long iova; + u32 fsr; + u32 fsynr; + u32 cbfrsynra; +}; + +void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx, + struct arm_smmu_context_fault_info *cfi); + +void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx, + const struct arm_smmu_context_fault_info *cfi); + #endif /* _ARM_SMMU_H */ diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index e079bb7a993e29e869260683ca8c961b9b46e516..b98a7a598b89739daedaa218b0f63e0ff707ec77 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -194,7 +194,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); - if (!(fsr & ARM_SMMU_FSR_FAULT)) + if (!(fsr & ARM_SMMU_CB_FSR_FAULT)) return IRQ_NONE; fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); @@ -274,7 +274,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, /* Clear context bank fault address fault status registers */ iommu_writel(ctx, ARM_SMMU_CB_FAR, 0); - iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); + iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT); /* TTBRs */ iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 3020b3b295d2ec4dec4aae449267fbff58f5bb30..95918eeb55768e31b6e24497103529a03139e4bc 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -680,19 +680,16 @@ static void iommu_dma_init_options(struct iommu_dma_options *options, /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() - * @base: IOVA at which the mappable address space starts - * @limit: Last address of the IOVA space * @dev: Device the domain is being initialised for * - * @base and @limit + 1 should be exact multiples of IOMMU page granularity to - * avoid rounding surprises. If necessary, we reserve the page at address 0 + * If the geometry and dma_range_map include address 0, we reserve that page * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * any change which could make prior IOVAs invalid will fail. */ -static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, - dma_addr_t limit, struct device *dev) +static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev) { struct iommu_dma_cookie *cookie = domain->iova_cookie; + const struct bus_dma_region *map = dev->dma_range_map; unsigned long order, base_pfn; struct iova_domain *iovad; int ret; @@ -704,19 +701,19 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); - base_pfn = max_t(unsigned long, 1, base >> order); + base_pfn = 1; /* Check the domain allows at least some access to the device... */ - if (domain->geometry.force_aperture) { - if (base > domain->geometry.aperture_end || - limit < domain->geometry.aperture_start) { + if (map) { + if (dma_range_map_min(map) > domain->geometry.aperture_end || + dma_range_map_max(map) < domain->geometry.aperture_start) { pr_warn("specified DMA range outside IOMMU capability\n"); return -EFAULT; } - /* ...then finally give it a kicking to make sure it fits */ - base_pfn = max_t(unsigned long, base_pfn, - domain->geometry.aperture_start >> order); } + /* ...then finally give it a kicking to make sure it fits */ + base_pfn = max_t(unsigned long, base_pfn, + domain->geometry.aperture_start >> order); /* start_pfn is always nonzero for an already-initialised domain */ mutex_lock(&cookie->mutex); @@ -962,8 +959,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, * but an IOMMU which supports smaller pages might not map the whole thing. */ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, - size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, - unsigned long attrs) + size_t size, struct sg_table *sgt, gfp_t gfp, unsigned long attrs) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -1037,15 +1033,14 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, } static void *iommu_dma_alloc_remap(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, - unsigned long attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct page **pages; struct sg_table sgt; void *vaddr; + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); - pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, - attrs); + pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, attrs); if (!pages) return NULL; *dma_handle = sgt.sgl->dma_address; @@ -1072,8 +1067,7 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, if (!sh) return NULL; - sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, - PAGE_KERNEL, attrs); + sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs); if (!sh->pages) { kfree(sh); return NULL; @@ -1175,9 +1169,6 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, */ if (dev_use_swiotlb(dev, size, dir) && iova_offset(iovad, phys | size)) { - void *padding_start; - size_t padding_size, aligned_size; - if (!is_swiotlb_active(dev)) { dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); return DMA_MAPPING_ERROR; @@ -1185,24 +1176,30 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, trace_swiotlb_bounced(dev, phys, size); - aligned_size = iova_align(iovad, size); - phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, + phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir, attrs); if (phys == DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; - /* Cleanup the padding area. */ - padding_start = phys_to_virt(phys); - padding_size = aligned_size; + /* + * Untrusted devices should not see padding areas with random + * leftover kernel data, so zero the pre- and post-padding. + * swiotlb_tbl_map_single() has initialized the bounce buffer + * proper to the contents of the original memory buffer. + */ + if (dev_is_untrusted(dev)) { + size_t start, virt = (size_t)phys_to_virt(phys); - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { - padding_start += size; - padding_size -= size; - } + /* Pre-padding */ + start = iova_align_down(iovad, virt); + memset((void *)start, 0, virt - start); - memset(padding_start, 0, padding_size); + /* Post-padding */ + start = virt + size; + memset((void *)start, 0, + iova_align(iovad, start) - start); + } } if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) @@ -1639,8 +1636,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (gfpflags_allow_blocking(gfp) && !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { - return iommu_dma_alloc_remap(dev, size, handle, gfp, - dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); + return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); } if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && @@ -1740,7 +1736,7 @@ static size_t iommu_dma_max_mapping_size(struct device *dev) return SIZE_MAX; } -static const struct dma_map_ops iommu_dmafops = { +static const struct dma_map_ops iommu_dma_ops = { .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, .free = iommu_dma_free, @@ -1765,25 +1761,20 @@ static const struct dma_map_ops iommu_dmafops = { .max_mapping_size = iommu_dma_max_mapping_size, }; -/* - * The IOMMU core code allocates the default DMA domain, which the underlying - * IOMMU driver needs to support via the dma-iommu layer. - */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) +void iommu_setup_dma_ops(struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (!domain) - goto out_err; + if (dev_is_pci(dev)) + dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; - /* - * The IOMMU core code allocates the default DMA domain, which the - * underlying IOMMU driver needs to support via the dma-iommu layer. - */ if (iommu_is_dma_domain(domain)) { - if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) + if (iommu_dma_init_domain(domain, dev)) goto out_err; - dev->dma_ops = &iommu_dmafops; + dev->dma_ops = &iommu_dma_ops; + } else if (dev->dma_ops == &iommu_dma_ops) { + /* Clean up if we've switched *from* a DMA domain */ + dev->dma_ops = NULL; } return; @@ -1791,7 +1782,6 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", dev_name(dev)); } -EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, phys_addr_t msi_addr, struct iommu_domain *domain) diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h index c829f1f82a991c63d8901f65f25c73c2af2f1bb2..c12d63457c76466d20afa325b34193f68a435f4b 100644 --- a/drivers/iommu/dma-iommu.h +++ b/drivers/iommu/dma-iommu.h @@ -9,6 +9,8 @@ #ifdef CONFIG_IOMMU_DMA +void iommu_setup_dma_ops(struct device *dev); + int iommu_get_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain); @@ -17,13 +19,13 @@ int iommu_dma_init_fq(struct iommu_domain *domain); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); extern bool iommu_dma_forcedac; -static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) -{ - dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; -} #else /* CONFIG_IOMMU_DMA */ +static inline void iommu_setup_dma_ops(struct device *dev) +{ +} + static inline int iommu_dma_init_fq(struct iommu_domain *domain) { return -EINVAL; @@ -42,9 +44,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } -static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) -{ -} - #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index fa0d1f19699e8d1d4f8ce54785da81a8826caf48..2e2bd8c1bcf5ff181300b1c59a11e169cc5410bf 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -217,12 +217,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); -static int dmar_map_gfx = 1; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int iommu_skip_te_disable; +static int disable_igfx_iommu; -#define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; @@ -261,7 +260,7 @@ static int __init intel_iommu_setup(char *str) no_platform_optin = 1; pr_info("IOMMU disabled\n"); } else if (!strncmp(str, "igfx_off", 8)) { - dmar_map_gfx = 0; + disable_igfx_iommu = 1; pr_info("Disable GFX device mapping\n"); } else if (!strncmp(str, "forcedac", 8)) { pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n"); @@ -867,7 +866,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain->use_first_level) - pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; tmp = 0ULL; if (!try_cmpxchg64(&pte->val, &tmp, pteval)) @@ -1372,21 +1371,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info) } } -static void __iommu_flush_dev_iotlb(struct device_domain_info *info, - u64 addr, unsigned int mask) -{ - u16 sid, qdep; - - if (!info || !info->ats_enabled) - return; - - sid = info->bus << 8 | info->devfn; - qdep = info->ats_qdep; - qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, - qdep, addr, mask); - quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep); -} - static void intel_flush_iotlb_all(struct iommu_domain *domain) { cache_tag_flush_all(to_dmar_domain(domain)); @@ -1885,7 +1869,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr |= DMA_FL_PTE_PRESENT; if (domain->use_first_level) { - attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; if (prot & DMA_PTE_WRITE) attr |= DMA_FL_PTE_DIRTY; } @@ -1972,7 +1956,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 { struct intel_iommu *iommu = info->iommu; struct context_entry *context; - u16 did_old; spin_lock(&iommu->lock); context = iommu_context_addr(iommu, bus, devfn, 0); @@ -1981,24 +1964,10 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 return; } - did_old = context_domain_id(context); - context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock(&iommu->lock); - iommu->flush.flush_context(iommu, - did_old, - (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, - DMA_CCMD_DEVICE_INVL); - - iommu->flush.flush_iotlb(iommu, - did_old, - 0, - 0, - DMA_TLB_DSI_FLUSH); - - __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); + intel_context_flush_present(info, context, true); } static int domain_setup_first_level(struct intel_iommu *iommu, @@ -2127,12 +2096,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, if (ret) return ret; - ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID); - if (ret) { - domain_detach_iommu(domain, iommu); - return ret; - } - info->domain = domain; spin_lock_irqsave(&domain->lock, flags); list_add(&info->link, &domain->devices); @@ -2150,15 +2113,21 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, else ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID); - if (ret) { - device_block_translation(dev); - return ret; - } + if (ret) + goto out_block_translation; if (sm_supported(info->iommu) || !domain_type_is_si(info->domain)) iommu_enable_pci_caps(info); + ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID); + if (ret) + goto out_block_translation; + return 0; + +out_block_translation: + device_block_translation(dev); + return ret; } /** @@ -2190,17 +2159,6 @@ static bool device_rmrr_is_relaxable(struct device *dev) return false; } -/* - * Return the required default domain type for a specific device. - * - * @dev: the device in query - * @startup: true if this is during early boot - * - * Returns: - * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain - * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain - * - 0: both identity and dynamic domains work for this device - */ static int device_def_domain_type(struct device *dev) { if (dev_is_pci(dev)) { @@ -2208,9 +2166,6 @@ static int device_def_domain_type(struct device *dev) if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return IOMMU_DOMAIN_IDENTITY; - - if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) - return IOMMU_DOMAIN_IDENTITY; } return 0; @@ -2511,9 +2466,6 @@ static int __init init_dmars(void) iommu_set_root_entry(iommu); } - if (!dmar_map_gfx) - iommu_identity_mapping |= IDENTMAP_GFX; - check_tylersburg_isoch(); ret = si_domain_init(hw_pass_through); @@ -2604,7 +2556,7 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ drhd->gfx_dedicated = 1; - if (!dmar_map_gfx) + if (disable_igfx_iommu) drhd->ignored = 1; } } @@ -3713,6 +3665,79 @@ static struct iommu_domain blocking_domain = { } }; +static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage) +{ + if (!intel_iommu_superpage) + return 0; + + if (first_stage) + return cap_fl1gp_support(iommu->cap) ? 2 : 1; + + return fls(cap_super_page_val(iommu->cap)); +} + +static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct dmar_domain *domain; + int addr_width; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&domain->devices); + INIT_LIST_HEAD(&domain->dev_pasids); + INIT_LIST_HEAD(&domain->cache_tags); + spin_lock_init(&domain->lock); + spin_lock_init(&domain->cache_lock); + xa_init(&domain->iommu_array); + + domain->nid = dev_to_node(dev); + domain->has_iotlb_device = info->ats_enabled; + domain->use_first_level = first_stage; + + /* calculate the address width */ + addr_width = agaw_to_width(iommu->agaw); + if (addr_width > cap_mgaw(iommu->cap)) + addr_width = cap_mgaw(iommu->cap); + domain->gaw = addr_width; + domain->agaw = iommu->agaw; + domain->max_addr = __DOMAIN_MAX_ADDR(addr_width); + + /* iommu memory access coherency */ + domain->iommu_coherency = iommu_paging_structure_coherency(iommu); + + /* pagesize bitmap */ + domain->domain.pgsize_bitmap = SZ_4K; + domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); + domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); + + /* + * IOVA aperture: First-level translation restricts the input-address + * to a canonical address (i.e., address bits 63:N have the same value + * as address bit [N-1], where N is 48-bits with 4-level paging and + * 57-bits with 5-level paging). Hence, skip bit [N-1]. + */ + domain->domain.geometry.force_aperture = true; + domain->domain.geometry.aperture_start = 0; + if (first_stage) + domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); + else + domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); + + /* always allocate the top pgd */ + domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL); + if (!domain->pgd) { + kfree(domain); + return ERR_PTR(-ENOMEM); + } + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + + return domain; +} + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -3775,15 +3800,14 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, if (user_data || (dirty_tracking && !ssads_supported(iommu))) return ERR_PTR(-EOPNOTSUPP); - /* - * domain_alloc_user op needs to fully initialize a domain before - * return, so uses iommu_domain_alloc() here for simple. - */ - domain = iommu_domain_alloc(dev->bus); - if (!domain) - return ERR_PTR(-ENOMEM); - - dmar_domain = to_dmar_domain(domain); + /* Do not use first stage for user domain translation. */ + dmar_domain = paging_domain_alloc(dev, false); + if (IS_ERR(dmar_domain)) + return ERR_CAST(dmar_domain); + domain = &dmar_domain->domain; + domain->type = IOMMU_DOMAIN_UNMANAGED; + domain->owner = &intel_iommu_ops; + domain->ops = intel_iommu_ops.default_domain_ops; if (nested_parent) { dmar_domain->nested_parent = true; @@ -4192,15 +4216,6 @@ static void intel_iommu_release_device(struct device *dev) set_dma_ops(dev, NULL); } -static void intel_iommu_probe_finalize(struct device *dev) -{ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); - - if (is_zhaoxin_kh40000) - kh40000_set_iommu_dma_ops(dev); -} - static void intel_iommu_get_resv_regions(struct device *device, struct list_head *head) { @@ -4302,6 +4317,37 @@ static int intel_iommu_enable_sva(struct device *dev) return 0; } +static int context_flip_pri(struct device_domain_info *info, bool enable) +{ + struct intel_iommu *iommu = info->iommu; + u8 bus = info->bus, devfn = info->devfn; + struct context_entry *context; + + spin_lock(&iommu->lock); + if (context_copied(iommu, bus, devfn)) { + spin_unlock(&iommu->lock); + return -EINVAL; + } + + context = iommu_context_addr(iommu, bus, devfn, false); + if (!context || !context_present(context)) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (enable) + context_set_sm_pre(context); + else + context_clear_sm_pre(context); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); + intel_context_flush_present(info, context, true); + spin_unlock(&iommu->lock); + + return 0; +} + static int intel_iommu_enable_iopf(struct device *dev) { struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL; @@ -4331,15 +4377,23 @@ static int intel_iommu_enable_iopf(struct device *dev) if (ret) return ret; + ret = context_flip_pri(info, true); + if (ret) + goto err_remove_device; + ret = pci_enable_pri(pdev, PRQ_DEPTH); - if (ret) { - iopf_queue_remove_device(iommu->iopf_queue, dev); - return ret; - } + if (ret) + goto err_clear_pri; info->pri_enabled = 1; return 0; +err_clear_pri: + context_flip_pri(info, false); +err_remove_device: + iopf_queue_remove_device(iommu->iopf_queue, dev); + + return ret; } static int intel_iommu_disable_iopf(struct device *dev) @@ -4350,6 +4404,15 @@ static int intel_iommu_disable_iopf(struct device *dev) if (!info->pri_enabled) return -EINVAL; + /* Disable new PRI reception: */ + context_flip_pri(info, false); + + /* + * Remove device from fault queue and acknowledge all outstanding + * PRQs to the device: + */ + iopf_queue_remove_device(iommu->iopf_queue, dev); + /* * PCIe spec states that by clearing PRI enable bit, the Page * Request Interface will not issue new page requests, but has @@ -4360,7 +4423,6 @@ static int intel_iommu_disable_iopf(struct device *dev) */ pci_disable_pri(to_pci_dev(dev)); info->pri_enabled = 0; - iopf_queue_remove_device(iommu->iopf_queue, dev); return 0; } @@ -4671,7 +4733,6 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc_user = intel_iommu_domain_alloc_user, .domain_alloc_sva = intel_svm_domain_alloc, .probe_device = intel_iommu_probe_device, - .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .device_group = intel_iommu_device_group, @@ -4704,7 +4765,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev) return; pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); - dmar_map_gfx = 0; + disable_igfx_iommu = 1; } /* G4x/GM45 integrated gfx dmar support is totally busted. */ @@ -4788,8 +4849,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) if (!(ggc & GGC_MEMORY_VT_ENABLED)) { pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); - dmar_map_gfx = 0; - } else if (dmar_map_gfx) { + disable_igfx_iommu = 1; + } else if (!disable_igfx_iommu) { /* we have to ensure the gfx device is idle before we flush */ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n"); iommu_set_dma_strict(); diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 83d394f166a7490dc16ce98baa1274a23742af20..2086688743f09cbf4f1e1a537328279bb5a2db96 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -49,7 +49,6 @@ #define DMA_FL_PTE_US BIT_ULL(2) #define DMA_FL_PTE_ACCESS BIT_ULL(5) #define DMA_FL_PTE_DIRTY BIT_ULL(6) -#define DMA_FL_PTE_XD BIT_ULL(63) #define DMA_SL_PTE_DIRTY_BIT 9 #define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) @@ -831,11 +830,10 @@ static inline void dma_clear_pte(struct dma_pte *pte) static inline u64 dma_pte_addr(struct dma_pte *pte) { #ifdef CONFIG_64BIT - return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD); + return pte->val & VTD_PAGE_MASK; #else /* Must have a full atomic 64-bit read */ - return __cmpxchg64(&pte->val, 0ULL, 0ULL) & - VTD_PAGE_MASK & (~DMA_FL_PTE_XD); + return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; #endif } @@ -1047,6 +1045,15 @@ static inline void context_set_sm_pre(struct context_entry *context) context->lo |= BIT_ULL(4); } +/* + * Clear the PRE(Page Request Enable) field of a scalable mode context + * entry. + */ +static inline void context_clear_sm_pre(struct context_entry *context) +{ + context->lo &= ~BIT_ULL(4); +} + /* Returns a number of VTD pages, but aligned to MM page size */ static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size) { @@ -1145,6 +1152,10 @@ void cache_tag_flush_all(struct dmar_domain *domain); void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, unsigned long end); +void intel_context_flush_present(struct device_domain_info *info, + struct context_entry *context, + bool affect_domains); + #ifdef CONFIG_INTEL_IOMMU_SVM void intel_svm_check(struct intel_iommu *iommu); int intel_svm_enable_prq(struct intel_iommu *iommu); diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index f4ddd4d9a825fe604d977f8eb4788ff4ac8a671b..e59403767faea1d9360ca2d311c8bbb5369e2588 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "iommu.h" #include "../irq_remapping.h" @@ -50,6 +51,7 @@ struct irq_2_iommu { u16 sub_handle; u8 irte_mask; enum irq_mode mode; + bool posted_msi; }; struct intel_ir_data { @@ -599,8 +601,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (ir_pre_enabled(iommu)) { if (!is_kdump_kernel()) { - pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", - iommu->name); + pr_info_once("IRQ remapping was enabled on %s but we are not in kdump mode\n", + iommu->name); clear_ir_pre_enabled(iommu); iommu_disable_irq_remapping(iommu); } else if (iommu_load_old_irte(iommu)) @@ -1118,6 +1120,14 @@ static void prepare_irte(struct irte *irte, int vector, unsigned int dest) irte->redir_hint = 1; } +static void prepare_irte_posted(struct irte *irte) +{ + memset(irte, 0, sizeof(*irte)); + + irte->present = 1; + irte->p_pst = 1; +} + struct irq_remap_ops intel_irq_remap_ops = { .prepare = intel_prepare_irq_remapping, .enable = intel_enable_irq_remapping, @@ -1126,6 +1136,47 @@ struct irq_remap_ops intel_irq_remap_ops = { .enable_faulting = enable_drhd_fault_handling, }; +#ifdef CONFIG_X86_POSTED_MSI + +static phys_addr_t get_pi_desc_addr(struct irq_data *irqd) +{ + int cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd)); + + if (WARN_ON(cpu >= nr_cpu_ids)) + return 0; + + return __pa(per_cpu_ptr(&posted_msi_pi_desc, cpu)); +} + +static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) +{ + struct intel_ir_data *ir_data = irqd->chip_data; + struct irte *irte = &ir_data->irte_entry; + struct irte irte_pi; + u64 pid_addr; + + pid_addr = get_pi_desc_addr(irqd); + + if (!pid_addr) { + pr_warn("Failed to setup IRQ %d for posted mode", irqd->irq); + return; + } + + memset(&irte_pi, 0, sizeof(irte_pi)); + + /* The shared IRTE already be set up as posted during alloc_irte */ + dmar_copy_shared_irte(&irte_pi, irte); + + irte_pi.pda_l = (pid_addr >> (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); + irte_pi.pda_h = (pid_addr >> 32) & ~(-1UL << PDA_HIGH_BIT); + + modify_irte(&ir_data->irq_2_iommu, &irte_pi); +} + +#else +static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {} +#endif + static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) { struct intel_ir_data *ir_data = irqd->chip_data; @@ -1139,8 +1190,9 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) irte->vector = cfg->vector; irte->dest_id = IRTE_DEST(cfg->dest_apicid); - /* Update the hardware only if the interrupt is in remapped mode. */ - if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) + if (ir_data->irq_2_iommu.posted_msi) + intel_ir_reconfigure_irte_posted(irqd); + else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) modify_irte(&ir_data->irq_2_iommu, irte); } @@ -1194,7 +1246,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) struct intel_ir_data *ir_data = data->chip_data; struct vcpu_data *vcpu_pi_info = info; - /* stop posting interrupts, back to remapping mode */ + /* stop posting interrupts, back to the default mode */ if (!vcpu_pi_info) { modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); } else { @@ -1233,6 +1285,49 @@ static struct irq_chip intel_ir_chip = { .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, }; +/* + * With posted MSIs, all vectors are multiplexed into a single notification + * vector. Devices MSIs are then dispatched in a demux loop where + * EOIs can be coalesced as well. + * + * "INTEL-IR-POST" IRQ chip does not do EOI on ACK, thus the dummy irq_ack() + * function. Instead EOI is performed by the posted interrupt notification + * handler. + * + * For the example below, 3 MSIs are coalesced into one CPU notification. Only + * one apic_eoi() is needed. + * + * __sysvec_posted_msi_notification() + * irq_enter(); + * handle_edge_irq() + * irq_chip_ack_parent() + * dummy(); // No EOI + * handle_irq_event() + * driver_handler() + * handle_edge_irq() + * irq_chip_ack_parent() + * dummy(); // No EOI + * handle_irq_event() + * driver_handler() + * handle_edge_irq() + * irq_chip_ack_parent() + * dummy(); // No EOI + * handle_irq_event() + * driver_handler() + * apic_eoi() + * irq_exit() + */ + +static void dummy_ack(struct irq_data *d) { } + +static struct irq_chip intel_ir_chip_post_msi = { + .name = "INTEL-IR-POST", + .irq_ack = dummy_ack, + .irq_set_affinity = intel_ir_set_affinity, + .irq_compose_msi_msg = intel_ir_compose_msi_msg, + .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, +}; + static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle) { memset(msg, 0, sizeof(*msg)); @@ -1274,6 +1369,11 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data, break; case X86_IRQ_ALLOC_TYPE_PCI_MSI: case X86_IRQ_ALLOC_TYPE_PCI_MSIX: + if (posted_msi_supported()) { + prepare_irte_posted(irte); + data->irq_2_iommu.posted_msi = 1; + } + set_msi_sid(irte, pci_real_dma_dev(msi_desc_to_pci_dev(info->desc))); break; @@ -1361,7 +1461,12 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, irq_data->hwirq = (index << 16) + i; irq_data->chip_data = ird; - irq_data->chip = &intel_ir_chip; + if (posted_msi_supported() && + ((info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) || + (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX))) + irq_data->chip = &intel_ir_chip_post_msi; + else + irq_data->chip = &intel_ir_chip; intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i); irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); } diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index abce19e2ad6f4e55d7b7b662a4dda5ebef6cef29..5792c817cefa56872cf3434368e967a5a62e6dd9 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -146,6 +146,8 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) retry: entries = get_pasid_table_from_pde(&dir[dir_index]); if (!entries) { + u64 tmp; + entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); if (!entries) return NULL; @@ -156,8 +158,9 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) * clear. However, this entry might be populated by others * while we are preparing it. Use theirs with a retry. */ - if (cmpxchg64(&dir[dir_index].val, 0ULL, - (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { + tmp = 0ULL; + if (!try_cmpxchg64(&dir[dir_index].val, &tmp, + (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { iommu_free_page(entries); goto retry; } @@ -333,7 +336,6 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, pasid_set_domain_id(pte, did); pasid_set_address_width(pte, iommu->agaw); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - pasid_set_nxe(pte); /* Setup Present and PASID Granular Transfer Type: */ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); @@ -692,25 +694,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn) context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock(&iommu->lock); - - /* - * Cache invalidation for changes to a scalable-mode context table - * entry. - * - * Section 6.5.3.3 of the VT-d spec: - * - Device-selective context-cache invalidation; - * - Domain-selective PASID-cache invalidation to affected domains - * (can be skipped if all PASID entries were not-present); - * - Domain-selective IOTLB invalidation to affected domains; - * - Global Device-TLB invalidation to affected functions. - * - * The iommu has been parked in the blocking state. All domains have - * been detached from the device or PASID. The PASID and IOTLB caches - * have been invalidated during the domain detach path. - */ - iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn), - DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); - devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID); + intel_context_flush_present(info, context, false); } static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data) @@ -768,8 +752,6 @@ static int context_entry_set_pasid_table(struct context_entry *context, if (info->ats_supported) context_set_sm_dte(context); - if (info->pri_supported) - context_set_sm_pre(context); if (info->pasid_supported) context_set_pasid(context); @@ -872,3 +854,89 @@ int intel_pasid_setup_sm_context(struct device *dev) return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev); } + +/* + * Global Device-TLB invalidation following changes in a context entry which + * was present. + */ +static void __context_flush_dev_iotlb(struct device_domain_info *info) +{ + if (!info->ats_enabled) + return; + + qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn), + info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH); + + /* + * There is no guarantee that the device DMA is stopped when it reaches + * here. Therefore, always attempt the extra device TLB invalidation + * quirk. The impact on performance is acceptable since this is not a + * performance-critical path. + */ + quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID, + info->ats_qdep); +} + +/* + * Cache invalidations after change in a context table entry that was present + * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If + * IOMMU is in scalable mode and all PASID table entries of the device were + * non-present, set flush_domains to false. Otherwise, true. + */ +void intel_context_flush_present(struct device_domain_info *info, + struct context_entry *context, + bool flush_domains) +{ + struct intel_iommu *iommu = info->iommu; + u16 did = context_domain_id(context); + struct pasid_entry *pte; + int i; + + /* + * Device-selective context-cache invalidation. The Domain-ID field + * of the Context-cache Invalidate Descriptor is ignored by hardware + * when operating in scalable mode. Therefore the @did value doesn't + * matter in scalable mode. + */ + iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn), + DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); + + /* + * For legacy mode: + * - Domain-selective IOTLB invalidation + * - Global Device-TLB invalidation to all affected functions + */ + if (!sm_supported(iommu)) { + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + __context_flush_dev_iotlb(info); + + return; + } + + /* + * For scalable mode: + * - Domain-selective PASID-cache invalidation to affected domains + * - Domain-selective IOTLB invalidation to affected domains + * - Global Device-TLB invalidation to affected functions + */ + if (flush_domains) { + /* + * If the IOMMU is running in scalable mode and there might + * be potential PASID translations, the caller should hold + * the lock to ensure that context changes and cache flushes + * are atomic. + */ + assert_spin_locked(&iommu->lock); + for (i = 0; i < info->pasid_table->max_pasid; i++) { + pte = intel_pasid_get_entry(info->dev, i); + if (!pte || !pasid_pte_is_present(pte)) + continue; + + did = pasid_get_domain_id(pte); + qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0); + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + } + } + + __context_flush_dev_iotlb(info); +} diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index da9978fef7ac5f6287e7fb3049046516ad4c3aab..dde6d3ba5ae0fce449a694120a77f56deb0b6424 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -247,16 +247,6 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value) pasid_set_bits(&pe->val[1], 1 << 23, value << 23); } -/* - * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID - * entry. It is required when XD bit of the first level page table - * entry is about to be set. - */ -static inline void pasid_set_nxe(struct pasid_entry *pe) -{ - pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5); -} - /* * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode * PASID entry. diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index 06d78fcc79fdb6818af10d37b3a23f8cb0ae6b31..cd679c13752e00c30766fd060ff77b8017934645 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -59,30 +59,6 @@ void iopf_free_group(struct iopf_group *group) } EXPORT_SYMBOL_GPL(iopf_free_group); -static struct iommu_domain *get_domain_for_iopf(struct device *dev, - struct iommu_fault *fault) -{ - struct iommu_domain *domain; - - if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { - domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0); - if (IS_ERR(domain)) - domain = NULL; - } else { - domain = iommu_get_domain_for_dev(dev); - } - - if (!domain || !domain->iopf_handler) { - dev_warn_ratelimited(dev, - "iopf (pasid %d) without domain attached or handler installed\n", - fault->prm.pasid); - - return NULL; - } - - return domain; -} - /* Non-last request of a group. Postpone until the last one. */ static int report_partial_fault(struct iommu_fault_param *fault_param, struct iommu_fault *fault) @@ -134,6 +110,8 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param, list_add(&group->pending_node, &iopf_param->faults); mutex_unlock(&iopf_param->lock); + group->fault_count = list_count_nodes(&group->faults); + return group; } @@ -206,20 +184,51 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) if (group == &abort_group) goto err_abort; - group->domain = get_domain_for_iopf(dev, fault); - if (!group->domain) + if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { + group->attach_handle = iommu_attach_handle_get(dev->iommu_group, + fault->prm.pasid, + 0); + if (IS_ERR(group->attach_handle)) { + const struct iommu_ops *ops = dev_iommu_ops(dev); + + if (!ops->user_pasid_table) + goto err_abort; + + /* + * The iommu driver for this device supports user- + * managed PASID table. Therefore page faults for + * any PASID should go through the NESTING domain + * attached to the device RID. + */ + group->attach_handle = + iommu_attach_handle_get(dev->iommu_group, + IOMMU_NO_PASID, + IOMMU_DOMAIN_NESTED); + if (IS_ERR(group->attach_handle)) + goto err_abort; + } + } else { + group->attach_handle = + iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0); + if (IS_ERR(group->attach_handle)) + goto err_abort; + } + + if (!group->attach_handle->domain->iopf_handler) goto err_abort; /* * On success iopf_handler must call iopf_group_response() and * iopf_free_group() */ - if (group->domain->iopf_handler(group)) + if (group->attach_handle->domain->iopf_handler(group)) goto err_abort; return; err_abort: + dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n", + fault->prm.pasid); iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); if (group == &abort_group) __iopf_free_group(group); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 38ebd192e5dbc9459010eeab013497be1bb1dd91..fd9a5c136b5d758631d67c527426164aa21447a1 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -76,6 +76,7 @@ #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) +#define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51) #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) @@ -85,7 +86,7 @@ #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) /* Ignore the contiguous bit for block splitting */ -#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) +#define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM) #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ ARM_LPAE_PTE_ATTR_HI_MASK) /* Software bit for solving coherency races */ @@ -93,7 +94,11 @@ /* Stage-1 PTE */ #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) -#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) +#define ARM_LPAE_PTE_AP_RDONLY_BIT 7 +#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \ + ARM_LPAE_PTE_AP_RDONLY_BIT) +#define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \ + ARM_LPAE_PTE_DBM) #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) @@ -139,6 +144,12 @@ #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) +#define iopte_writeable_dirty(pte) \ + (((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM) + +#define iopte_set_writeable_clean(ptep) \ + set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep)) + struct arm_lpae_io_pgtable { struct io_pgtable iop; @@ -160,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; } +static inline bool iopte_table(arm_lpae_iopte pte, int lvl) +{ + if (lvl == (ARM_LPAE_MAX_LEVELS - 1)) + return false; + return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE; +} + static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, struct arm_lpae_io_pgtable *data) { @@ -434,6 +452,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, pte = ARM_LPAE_PTE_nG; if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) pte |= ARM_LPAE_PTE_AP_RDONLY; + else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD) + pte |= ARM_LPAE_PTE_DBM; if (!(prot & IOMMU_PRIV)) pte |= ARM_LPAE_PTE_AP_UNPRIV; } else { @@ -737,6 +757,97 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, return iopte_to_paddr(pte, data) | iova; } +struct io_pgtable_walk_data { + struct iommu_dirty_bitmap *dirty; + unsigned long flags; + u64 addr; + const u64 end; +}; + +static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data, + struct io_pgtable_walk_data *walk_data, + arm_lpae_iopte *ptep, + int lvl); + +static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data, + struct io_pgtable_walk_data *walk_data, + arm_lpae_iopte *ptep, int lvl) +{ + struct io_pgtable *iop = &data->iop; + arm_lpae_iopte pte = READ_ONCE(*ptep); + + if (iopte_leaf(pte, lvl, iop->fmt)) { + size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data); + + if (iopte_writeable_dirty(pte)) { + iommu_dirty_bitmap_record(walk_data->dirty, + walk_data->addr, size); + if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR)) + iopte_set_writeable_clean(ptep); + } + walk_data->addr += size; + return 0; + } + + if (WARN_ON(!iopte_table(pte, lvl))) + return -EINVAL; + + ptep = iopte_deref(pte, data); + return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1); +} + +static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data, + struct io_pgtable_walk_data *walk_data, + arm_lpae_iopte *ptep, + int lvl) +{ + u32 idx; + int max_entries, ret; + + if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) + return -EINVAL; + + if (lvl == data->start_level) + max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte); + else + max_entries = ARM_LPAE_PTES_PER_TABLE(data); + + for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data); + (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) { + ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl); + if (ret) + return ret; + } + + return 0; +} + +static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct io_pgtable_walk_data walk_data = { + .dirty = dirty, + .flags = flags, + .addr = iova, + .end = iova + size, + }; + arm_lpae_iopte *ptep = data->pgd; + int lvl = data->start_level; + + if (WARN_ON(!size)) + return -EINVAL; + if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1))) + return -EINVAL; + if (data->iop.fmt != ARM_64_LPAE_S1) + return -EINVAL; + + return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl); +} + static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) { unsigned long granule, page_sizes; @@ -815,6 +926,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) .map_pages = arm_lpae_map_pages, .unmap_pages = arm_lpae_unmap_pages, .iova_to_phys = arm_lpae_iova_to_phys, + .read_and_clear_dirty = arm_lpae_read_and_clear_dirty, }; return data; @@ -830,7 +942,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_ARM_TTBR1 | - IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) + IO_PGTABLE_QUIRK_ARM_OUTER_WBWA | + IO_PGTABLE_QUIRK_ARM_HD)) return NULL; data = arm_lpae_alloc_pgtable(cfg); diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h index 5f731d994803c6bb644ad8b078da026be1c51f28..de5b54eaa8bf1ab607fb4a5aaf4393a66bb5cfca 100644 --- a/drivers/iommu/iommu-priv.h +++ b/drivers/iommu/iommu-priv.h @@ -17,6 +17,13 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) return dev->iommu->iommu_dev->ops; } +const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode); + +static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec) +{ + return iommu_ops_from_fwnode(fwspec ? fwspec->iommu_fwnode : NULL); +} + int iommu_group_replace_domain(struct iommu_group *group, struct iommu_domain *new_domain); @@ -28,4 +35,15 @@ void iommu_device_unregister_bus(struct iommu_device *iommu, const struct bus_type *bus, struct notifier_block *nb); +struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group, + ioasid_t pasid, + unsigned int type); +int iommu_attach_group_handle(struct iommu_domain *domain, + struct iommu_group *group, + struct iommu_attach_handle *handle); +void iommu_detach_group_handle(struct iommu_domain *domain, + struct iommu_group *group); +int iommu_replace_group_handle(struct iommu_group *group, + struct iommu_domain *new_domain, + struct iommu_attach_handle *handle); #endif /* __LINUX_IOMMU_PRIV_H */ diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 18a35e798b729c6cf56b1bd1de83cc40eff438f9..503c5d23c1ea27306a8e9fea5e476037fa4064d3 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -10,6 +10,8 @@ #include "iommu-priv.h" static DEFINE_MUTEX(iommu_sva_lock); +static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm); /* Allocate a PASID for the mm within range (inclusive) */ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev) @@ -41,7 +43,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de } iommu_mm->pasid = pasid; INIT_LIST_HEAD(&iommu_mm->sva_domains); - INIT_LIST_HEAD(&iommu_mm->sva_handles); /* * Make sure the write to mm->iommu_mm is not reordered in front of * initialization to iommu_mm fields. If it does, readers may see a @@ -69,11 +70,16 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de */ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) { + struct iommu_group *group = dev->iommu_group; + struct iommu_attach_handle *attach_handle; struct iommu_mm_data *iommu_mm; struct iommu_domain *domain; struct iommu_sva *handle; int ret; + if (!group) + return ERR_PTR(-ENODEV); + mutex_lock(&iommu_sva_lock); /* Allocate mm->pasid if necessary. */ @@ -83,12 +89,22 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm goto out_unlock; } - list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) { - if (handle->dev == dev) { - refcount_inc(&handle->users); - mutex_unlock(&iommu_sva_lock); - return handle; + /* A bond already exists, just take a reference`. */ + attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA); + if (!IS_ERR(attach_handle)) { + handle = container_of(attach_handle, struct iommu_sva, handle); + if (attach_handle->domain->mm != mm) { + ret = -EBUSY; + goto out_unlock; } + refcount_inc(&handle->users); + mutex_unlock(&iommu_sva_lock); + return handle; + } + + if (PTR_ERR(attach_handle) != -ENOENT) { + ret = PTR_ERR(attach_handle); + goto out_unlock; } handle = kzalloc(sizeof(*handle), GFP_KERNEL); @@ -99,7 +115,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm /* Search for an existing domain. */ list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) { - ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid); + ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, + &handle->handle); if (!ret) { domain->users++; goto out; @@ -113,7 +130,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm goto out_free_handle; } - ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid); + ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid, + &handle->handle); if (ret) goto out_free_domain; domain->users = 1; @@ -121,10 +139,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm out: refcount_set(&handle->users, 1); - list_add(&handle->handle_item, &mm->iommu_mm->sva_handles); mutex_unlock(&iommu_sva_lock); handle->dev = dev; - handle->domain = domain; return handle; out_free_domain: @@ -147,7 +163,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device); */ void iommu_sva_unbind_device(struct iommu_sva *handle) { - struct iommu_domain *domain = handle->domain; + struct iommu_domain *domain = handle->handle.domain; struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm; struct device *dev = handle->dev; @@ -156,7 +172,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle) mutex_unlock(&iommu_sva_lock); return; } - list_del(&handle->handle_item); iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); if (--domain->users == 0) { @@ -170,7 +185,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); u32 iommu_sva_get_pasid(struct iommu_sva *handle) { - struct iommu_domain *domain = handle->domain; + struct iommu_domain *domain = handle->handle.domain; return mm_get_enqcmd_pasid(domain->mm); } @@ -259,7 +274,8 @@ static void iommu_sva_handle_iopf(struct work_struct *work) if (status != IOMMU_PAGE_RESP_SUCCESS) break; - status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm); + status = iommu_sva_handle_mm(&iopf->fault, + group->attach_handle->domain->mm); } iopf_group_response(group, status); @@ -277,8 +293,8 @@ static int iommu_sva_iopf_handler(struct iopf_group *group) return 0; } -struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, - struct mm_struct *mm) +static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm) { const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9265670edf0bf2809636b99af6f81172edbd9051..6b0f42ea9218a2a738552fd5aeaf07ebcecd9838 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -510,7 +510,6 @@ DEFINE_MUTEX(iommu_probe_device_lock); static int __iommu_probe_device(struct device *dev, struct list_head *group_list) { const struct iommu_ops *ops; - struct iommu_fwspec *fwspec; struct iommu_group *group; struct group_device *gdev; int ret; @@ -523,12 +522,7 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list * be present, and that any of their registered instances has suitable * ops for probing, and thus cheekily co-opt the same mechanism. */ - fwspec = dev_iommu_fwspec_get(dev); - if (fwspec && fwspec->ops) - ops = fwspec->ops; - else - ops = iommu_ops_from_fwnode(NULL); - + ops = iommu_fwspec_ops(dev_iommu_fwspec_get(dev)); if (!ops) return -ENODEV; /* @@ -581,10 +575,11 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list if (list_empty(&group->entry)) list_add_tail(&group->entry, group_list); } - mutex_unlock(&group->mutex); - if (dev_is_pci(dev)) - iommu_dma_set_pci_32bit_workaround(dev); + if (group->default_domain) + iommu_setup_dma_ops(dev); + + mutex_unlock(&group->mutex); return 0; @@ -1839,6 +1834,8 @@ int bus_iommu_probe(const struct bus_type *bus) mutex_unlock(&group->mutex); return ret; } + for_each_group_device(group, gdev) + iommu_setup_dma_ops(gdev->dev); mutex_unlock(&group->mutex); /* @@ -2024,6 +2021,10 @@ static int __iommu_domain_alloc_dev(struct device *dev, void *data) return 0; } +/* + * The iommu ops in bus has been retired. Do not use this interface in + * new drivers. + */ struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) { const struct iommu_ops *ops = NULL; @@ -2040,6 +2041,22 @@ struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) } EXPORT_SYMBOL_GPL(iommu_domain_alloc); +/** + * iommu_paging_domain_alloc() - Allocate a paging domain + * @dev: device for which the domain is allocated + * + * Allocate a paging domain which will be managed by a kernel driver. Return + * allocated domain if successful, or a ERR pointer for failure. + */ +struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +{ + if (!dev_has_iommu(dev)) + return ERR_PTR(-ENODEV); + + return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED); +} +EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc); + void iommu_domain_free(struct iommu_domain *domain) { if (domain->type == IOMMU_DOMAIN_SVA) @@ -2832,13 +2849,16 @@ const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode return ops; } -int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, - const struct iommu_ops *ops) +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) { + const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + if (!ops) + return -EPROBE_DEFER; + if (fwspec) - return ops == fwspec->ops ? 0 : -EINVAL; + return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; if (!dev_iommu_get(dev)) return -ENOMEM; @@ -2848,9 +2868,8 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, if (!fwspec) return -ENOMEM; - of_node_get(to_of_node(iommu_fwnode)); + fwnode_handle_get(iommu_fwnode); fwspec->iommu_fwnode = iommu_fwnode; - fwspec->ops = ops; dev_iommu_fwspec_set(dev, fwspec); return 0; } @@ -3079,18 +3098,9 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, if (ret) goto out_unlock; - /* - * Release the mutex here because ops->probe_finalize() call-back of - * some vendor IOMMU drivers calls arm_iommu_attach_device() which - * in-turn might call back into IOMMU core code, where it tries to take - * group->mutex, resulting in a deadlock. - */ - mutex_unlock(&group->mutex); - /* Make sure dma_ops is appropriatley set */ for_each_group_device(group, gdev) - iommu_group_do_probe_finalize(gdev->dev); - return count; + iommu_setup_dma_ops(gdev->dev); out_unlock: mutex_unlock(&group->mutex); @@ -3376,16 +3386,17 @@ static void __iommu_remove_group_pasid(struct iommu_group *group, * @domain: the iommu domain. * @dev: the attached device. * @pasid: the pasid of the device. + * @handle: the attach handle. * * Return: 0 on success, or an error. */ int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle) { /* Caller must be a probed driver on dev */ struct iommu_group *group = dev->iommu_group; struct group_device *device; - void *curr; int ret; if (!domain->ops->set_dev_pasid) @@ -3406,11 +3417,12 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, } } - curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); - if (curr) { - ret = xa_err(curr) ? : -EBUSY; + if (handle) + handle->domain = domain; + + ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL); + if (ret) goto out_unlock; - } ret = __iommu_set_group_pasid(domain, group, pasid); if (ret) @@ -3438,46 +3450,11 @@ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, mutex_lock(&group->mutex); __iommu_remove_group_pasid(group, pasid, domain); - WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); + xa_erase(&group->pasid_array, pasid); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); -/* - * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev - * @dev: the queried device - * @pasid: the pasid of the device - * @type: matched domain type, 0 for any match - * - * This is a variant of iommu_get_domain_for_dev(). It returns the existing - * domain attached to pasid of a device. Callers must hold a lock around this - * function, and both iommu_attach/detach_dev_pasid() whenever a domain of - * type is being manipulated. This API does not internally resolve races with - * attach/detach. - * - * Return: attached domain on success, NULL otherwise. - */ -struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, - ioasid_t pasid, - unsigned int type) -{ - /* Caller must be a probed driver on dev */ - struct iommu_group *group = dev->iommu_group; - struct iommu_domain *domain; - - if (!group) - return NULL; - - xa_lock(&group->pasid_array); - domain = xa_load(&group->pasid_array, pasid); - if (type && domain && domain->type != type) - domain = ERR_PTR(-EBUSY); - xa_unlock(&group->pasid_array); - - return domain; -} -EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); - ioasid_t iommu_alloc_global_pasid(struct device *dev) { int ret; @@ -3504,3 +3481,137 @@ void iommu_free_global_pasid(ioasid_t pasid) ida_free(&iommu_global_pasid_ida, pasid); } EXPORT_SYMBOL_GPL(iommu_free_global_pasid); + +/** + * iommu_attach_handle_get - Return the attach handle + * @group: the iommu group that domain was attached to + * @pasid: the pasid within the group + * @type: matched domain type, 0 for any match + * + * Return handle or ERR_PTR(-ENOENT) on none, ERR_PTR(-EBUSY) on mismatch. + * + * Return the attach handle to the caller. The life cycle of an iommu attach + * handle is from the time when the domain is attached to the time when the + * domain is detached. Callers are required to synchronize the call of + * iommu_attach_handle_get() with domain attachment and detachment. The attach + * handle can only be used during its life cycle. + */ +struct iommu_attach_handle * +iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type) +{ + struct iommu_attach_handle *handle; + + xa_lock(&group->pasid_array); + handle = xa_load(&group->pasid_array, pasid); + if (!handle) + handle = ERR_PTR(-ENOENT); + else if (type && handle->domain->type != type) + handle = ERR_PTR(-EBUSY); + xa_unlock(&group->pasid_array); + + return handle; +} +EXPORT_SYMBOL_NS_GPL(iommu_attach_handle_get, IOMMUFD_INTERNAL); + +/** + * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group + * @domain: IOMMU domain to attach + * @group: IOMMU group that will be attached + * @handle: attach handle + * + * Returns 0 on success and error code on failure. + * + * This is a variant of iommu_attach_group(). It allows the caller to provide + * an attach handle and use it when the domain is attached. This is currently + * used by IOMMUFD to deliver the I/O page faults. + */ +int iommu_attach_group_handle(struct iommu_domain *domain, + struct iommu_group *group, + struct iommu_attach_handle *handle) +{ + int ret; + + if (handle) + handle->domain = domain; + + mutex_lock(&group->mutex); + ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); + if (ret) + goto err_unlock; + + ret = __iommu_attach_group(domain, group); + if (ret) + goto err_erase; + mutex_unlock(&group->mutex); + + return 0; +err_erase: + xa_erase(&group->pasid_array, IOMMU_NO_PASID); +err_unlock: + mutex_unlock(&group->mutex); + return ret; +} +EXPORT_SYMBOL_NS_GPL(iommu_attach_group_handle, IOMMUFD_INTERNAL); + +/** + * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group + * @domain: IOMMU domain to attach + * @group: IOMMU group that will be attached + * + * Detach the specified IOMMU domain from the specified IOMMU group. + * It must be used in conjunction with iommu_attach_group_handle(). + */ +void iommu_detach_group_handle(struct iommu_domain *domain, + struct iommu_group *group) +{ + mutex_lock(&group->mutex); + __iommu_group_set_core_domain(group); + xa_erase(&group->pasid_array, IOMMU_NO_PASID); + mutex_unlock(&group->mutex); +} +EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, IOMMUFD_INTERNAL); + +/** + * iommu_replace_group_handle - replace the domain that a group is attached to + * @group: IOMMU group that will be attached to the new domain + * @new_domain: new IOMMU domain to replace with + * @handle: attach handle + * + * This is a variant of iommu_group_replace_domain(). It allows the caller to + * provide an attach handle for the new domain and use it when the domain is + * attached. + */ +int iommu_replace_group_handle(struct iommu_group *group, + struct iommu_domain *new_domain, + struct iommu_attach_handle *handle) +{ + void *curr; + int ret; + + if (!new_domain) + return -EINVAL; + + mutex_lock(&group->mutex); + if (handle) { + ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL); + if (ret) + goto err_unlock; + } + + ret = __iommu_group_set_domain(group, new_domain); + if (ret) + goto err_release; + + curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL); + WARN_ON(xa_is_err(curr)); + + mutex_unlock(&group->mutex); + + return 0; +err_release: + xa_release(&group->pasid_array, IOMMU_NO_PASID); +err_unlock: + mutex_unlock(&group->mutex); + return ret; +} +EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, IOMMUFD_INTERNAL); diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index 34b446146961c29e7b24dc5cc890a5aa557a6ce8..cf4605962bea6a37e0e286df37d44c42f160d3df 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only iommufd-y := \ device.o \ + fault.o \ hw_pagetable.o \ io_pagetable.o \ ioas.o \ diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index 68ac138d925ca4f3f40ac94b34bfb4245f4d6d5a..59ae9a4ad0170f16d3cd3cc7812a68f89ad1bb51 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -215,6 +215,7 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, refcount_inc(&idev->obj.users); /* igroup refcount moves into iommufd_device */ idev->igroup = igroup; + mutex_init(&idev->iopf_lock); /* * If the caller fails after this success it must call @@ -376,7 +377,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, * attachment. */ if (list_empty(&idev->igroup->device_list)) { - rc = iommu_attach_group(hwpt->domain, idev->igroup->group); + rc = iommufd_hwpt_attach_device(hwpt, idev); if (rc) goto err_unresv; idev->igroup->hwpt = hwpt; @@ -402,7 +403,7 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev) mutex_lock(&idev->igroup->lock); list_del(&idev->group_item); if (list_empty(&idev->igroup->device_list)) { - iommu_detach_group(hwpt->domain, idev->igroup->group); + iommufd_hwpt_detach_device(hwpt, idev); idev->igroup->hwpt = NULL; } if (hwpt_is_paging(hwpt)) @@ -513,7 +514,7 @@ iommufd_device_do_replace(struct iommufd_device *idev, goto err_unlock; } - rc = iommu_group_replace_domain(igroup->group, hwpt->domain); + rc = iommufd_hwpt_replace_device(idev, hwpt, old_hwpt); if (rc) goto err_unresv; diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c new file mode 100644 index 0000000000000000000000000000000000000000..54d6cd20a6730d4ea82d9aa89b290ffc2f7b003e --- /dev/null +++ b/drivers/iommu/iommufd/fault.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation + */ +#define pr_fmt(fmt) "iommufd: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../iommu-priv.h" +#include "iommufd_private.h" + +static int iommufd_fault_iopf_enable(struct iommufd_device *idev) +{ + struct device *dev = idev->dev; + int ret; + + /* + * Once we turn on PCI/PRI support for VF, the response failure code + * should not be forwarded to the hardware due to PRI being a shared + * resource between PF and VFs. There is no coordination for this + * shared capability. This waits for a vPRI reset to recover. + */ + if (dev_is_pci(dev) && to_pci_dev(dev)->is_virtfn) + return -EINVAL; + + mutex_lock(&idev->iopf_lock); + /* Device iopf has already been on. */ + if (++idev->iopf_enabled > 1) { + mutex_unlock(&idev->iopf_lock); + return 0; + } + + ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF); + if (ret) + --idev->iopf_enabled; + mutex_unlock(&idev->iopf_lock); + + return ret; +} + +static void iommufd_fault_iopf_disable(struct iommufd_device *idev) +{ + mutex_lock(&idev->iopf_lock); + if (!WARN_ON(idev->iopf_enabled == 0)) { + if (--idev->iopf_enabled == 0) + iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF); + } + mutex_unlock(&idev->iopf_lock); +} + +static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct iommufd_attach_handle *handle; + int ret; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->idev = idev; + ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group, + &handle->handle); + if (ret) + kfree(handle); + + return ret; +} + +int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + int ret; + + if (!hwpt->fault) + return -EINVAL; + + ret = iommufd_fault_iopf_enable(idev); + if (ret) + return ret; + + ret = __fault_domain_attach_dev(hwpt, idev); + if (ret) + iommufd_fault_iopf_disable(idev); + + return ret; +} + +static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, + struct iommufd_attach_handle *handle) +{ + struct iommufd_fault *fault = hwpt->fault; + struct iopf_group *group, *next; + unsigned long index; + + if (!fault) + return; + + mutex_lock(&fault->mutex); + list_for_each_entry_safe(group, next, &fault->deliver, node) { + if (group->attach_handle != &handle->handle) + continue; + list_del(&group->node); + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); + iopf_free_group(group); + } + + xa_for_each(&fault->response, index, group) { + if (group->attach_handle != &handle->handle) + continue; + xa_erase(&fault->response, index); + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); + iopf_free_group(group); + } + mutex_unlock(&fault->mutex); +} + +static struct iommufd_attach_handle * +iommufd_device_get_attach_handle(struct iommufd_device *idev) +{ + struct iommu_attach_handle *handle; + + handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0); + if (!handle) + return NULL; + + return to_iommufd_handle(handle); +} + +void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct iommufd_attach_handle *handle; + + handle = iommufd_device_get_attach_handle(idev); + iommu_detach_group_handle(hwpt->domain, idev->igroup->group); + iommufd_auto_response_faults(hwpt, handle); + iommufd_fault_iopf_disable(idev); + kfree(handle); +} + +static int __fault_domain_replace_dev(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + struct iommufd_attach_handle *handle, *curr = NULL; + int ret; + + if (old->fault) + curr = iommufd_device_get_attach_handle(idev); + + if (hwpt->fault) { + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->handle.domain = hwpt->domain; + handle->idev = idev; + ret = iommu_replace_group_handle(idev->igroup->group, + hwpt->domain, &handle->handle); + } else { + ret = iommu_replace_group_handle(idev->igroup->group, + hwpt->domain, NULL); + } + + if (!ret && curr) { + iommufd_auto_response_faults(old, curr); + kfree(curr); + } + + return ret; +} + +int iommufd_fault_domain_replace_dev(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + bool iopf_off = !hwpt->fault && old->fault; + bool iopf_on = hwpt->fault && !old->fault; + int ret; + + if (iopf_on) { + ret = iommufd_fault_iopf_enable(idev); + if (ret) + return ret; + } + + ret = __fault_domain_replace_dev(idev, hwpt, old); + if (ret) { + if (iopf_on) + iommufd_fault_iopf_disable(idev); + return ret; + } + + if (iopf_off) + iommufd_fault_iopf_disable(idev); + + return 0; +} + +void iommufd_fault_destroy(struct iommufd_object *obj) +{ + struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj); + struct iopf_group *group, *next; + + /* + * The iommufd object's reference count is zero at this point. + * We can be confident that no other threads are currently + * accessing this pointer. Therefore, acquiring the mutex here + * is unnecessary. + */ + list_for_each_entry_safe(group, next, &fault->deliver, node) { + list_del(&group->node); + iopf_group_response(group, IOMMU_PAGE_RESP_INVALID); + iopf_free_group(group); + } +} + +static void iommufd_compose_fault_message(struct iommu_fault *fault, + struct iommu_hwpt_pgfault *hwpt_fault, + struct iommufd_device *idev, + u32 cookie) +{ + hwpt_fault->flags = fault->prm.flags; + hwpt_fault->dev_id = idev->obj.id; + hwpt_fault->pasid = fault->prm.pasid; + hwpt_fault->grpid = fault->prm.grpid; + hwpt_fault->perm = fault->prm.perm; + hwpt_fault->addr = fault->prm.addr; + hwpt_fault->length = 0; + hwpt_fault->cookie = cookie; +} + +static ssize_t iommufd_fault_fops_read(struct file *filep, char __user *buf, + size_t count, loff_t *ppos) +{ + size_t fault_size = sizeof(struct iommu_hwpt_pgfault); + struct iommufd_fault *fault = filep->private_data; + struct iommu_hwpt_pgfault data; + struct iommufd_device *idev; + struct iopf_group *group; + struct iopf_fault *iopf; + size_t done = 0; + int rc = 0; + + if (*ppos || count % fault_size) + return -ESPIPE; + + mutex_lock(&fault->mutex); + while (!list_empty(&fault->deliver) && count > done) { + group = list_first_entry(&fault->deliver, + struct iopf_group, node); + + if (group->fault_count * fault_size > count - done) + break; + + rc = xa_alloc(&fault->response, &group->cookie, group, + xa_limit_32b, GFP_KERNEL); + if (rc) + break; + + idev = to_iommufd_handle(group->attach_handle)->idev; + list_for_each_entry(iopf, &group->faults, list) { + iommufd_compose_fault_message(&iopf->fault, + &data, idev, + group->cookie); + if (copy_to_user(buf + done, &data, fault_size)) { + xa_erase(&fault->response, group->cookie); + rc = -EFAULT; + break; + } + done += fault_size; + } + + list_del(&group->node); + } + mutex_unlock(&fault->mutex); + + return done == 0 ? rc : done; +} + +static ssize_t iommufd_fault_fops_write(struct file *filep, const char __user *buf, + size_t count, loff_t *ppos) +{ + size_t response_size = sizeof(struct iommu_hwpt_page_response); + struct iommufd_fault *fault = filep->private_data; + struct iommu_hwpt_page_response response; + struct iopf_group *group; + size_t done = 0; + int rc = 0; + + if (*ppos || count % response_size) + return -ESPIPE; + + mutex_lock(&fault->mutex); + while (count > done) { + rc = copy_from_user(&response, buf + done, response_size); + if (rc) + break; + + group = xa_erase(&fault->response, response.cookie); + if (!group) { + rc = -EINVAL; + break; + } + + iopf_group_response(group, response.code); + iopf_free_group(group); + done += response_size; + } + mutex_unlock(&fault->mutex); + + return done == 0 ? rc : done; +} + +static __poll_t iommufd_fault_fops_poll(struct file *filep, + struct poll_table_struct *wait) +{ + struct iommufd_fault *fault = filep->private_data; + __poll_t pollflags = EPOLLOUT; + + poll_wait(filep, &fault->wait_queue, wait); + mutex_lock(&fault->mutex); + if (!list_empty(&fault->deliver)) + pollflags |= EPOLLIN | EPOLLRDNORM; + mutex_unlock(&fault->mutex); + + return pollflags; +} + +static int iommufd_fault_fops_release(struct inode *inode, struct file *filep) +{ + struct iommufd_fault *fault = filep->private_data; + + refcount_dec(&fault->obj.users); + iommufd_ctx_put(fault->ictx); + return 0; +} + +static const struct file_operations iommufd_fault_fops = { + .owner = THIS_MODULE, + .open = nonseekable_open, + .read = iommufd_fault_fops_read, + .write = iommufd_fault_fops_write, + .poll = iommufd_fault_fops_poll, + .release = iommufd_fault_fops_release, + .llseek = no_llseek, +}; + +int iommufd_fault_alloc(struct iommufd_ucmd *ucmd) +{ + struct iommu_fault_alloc *cmd = ucmd->cmd; + struct iommufd_fault *fault; + struct file *filep; + int fdno; + int rc; + + if (cmd->flags) + return -EOPNOTSUPP; + + fault = iommufd_object_alloc(ucmd->ictx, fault, IOMMUFD_OBJ_FAULT); + if (IS_ERR(fault)) + return PTR_ERR(fault); + + fault->ictx = ucmd->ictx; + INIT_LIST_HEAD(&fault->deliver); + xa_init_flags(&fault->response, XA_FLAGS_ALLOC1); + mutex_init(&fault->mutex); + init_waitqueue_head(&fault->wait_queue); + + filep = anon_inode_getfile("[iommufd-pgfault]", &iommufd_fault_fops, + fault, O_RDWR); + if (IS_ERR(filep)) { + rc = PTR_ERR(filep); + goto out_abort; + } + + refcount_inc(&fault->obj.users); + iommufd_ctx_get(fault->ictx); + fault->filep = filep; + + fdno = get_unused_fd_flags(O_CLOEXEC); + if (fdno < 0) { + rc = fdno; + goto out_fput; + } + + cmd->out_fault_id = fault->obj.id; + cmd->out_fault_fd = fdno; + + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); + if (rc) + goto out_put_fdno; + iommufd_object_finalize(ucmd->ictx, &fault->obj); + + fd_install(fdno, fault->filep); + + return 0; +out_put_fdno: + put_unused_fd(fdno); +out_fput: + fput(filep); + refcount_dec(&fault->obj.users); + iommufd_ctx_put(fault->ictx); +out_abort: + iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj); + + return rc; +} + +int iommufd_fault_iopf_handler(struct iopf_group *group) +{ + struct iommufd_hw_pagetable *hwpt; + struct iommufd_fault *fault; + + hwpt = group->attach_handle->domain->fault_data; + fault = hwpt->fault; + + mutex_lock(&fault->mutex); + list_add_tail(&group->node, &fault->deliver); + mutex_unlock(&fault->mutex); + + wake_up_interruptible(&fault->wait_queue); + + return 0; +} diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 33d142f8057d70a77f44e842afdd84b1bee0a970..5ea1e6e79dff8ced1d83b5fe5d262505a3274e88 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -8,6 +8,15 @@ #include "../iommu-priv.h" #include "iommufd_private.h" +static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt) +{ + if (hwpt->domain) + iommu_domain_free(hwpt->domain); + + if (hwpt->fault) + refcount_dec(&hwpt->fault->obj.users); +} + void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) { struct iommufd_hwpt_paging *hwpt_paging = @@ -22,9 +31,7 @@ void iommufd_hwpt_paging_destroy(struct iommufd_object *obj) hwpt_paging->common.domain); } - if (hwpt_paging->common.domain) - iommu_domain_free(hwpt_paging->common.domain); - + __iommufd_hwpt_destroy(&hwpt_paging->common); refcount_dec(&hwpt_paging->ioas->obj.users); } @@ -49,9 +56,7 @@ void iommufd_hwpt_nested_destroy(struct iommufd_object *obj) struct iommufd_hwpt_nested *hwpt_nested = container_of(obj, struct iommufd_hwpt_nested, common.obj); - if (hwpt_nested->common.domain) - iommu_domain_free(hwpt_nested->common.domain); - + __iommufd_hwpt_destroy(&hwpt_nested->common); refcount_dec(&hwpt_nested->parent->common.obj.users); } @@ -114,6 +119,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, return ERR_PTR(-EOPNOTSUPP); if (flags & ~valid_flags) return ERR_PTR(-EOPNOTSUPP); + if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && + !device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) + return ERR_PTR(-EOPNOTSUPP); hwpt_paging = __iommufd_object_alloc( ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); @@ -137,9 +145,10 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, } hwpt->domain->owner = ops; } else { - hwpt->domain = iommu_domain_alloc(idev->dev->bus); - if (!hwpt->domain) { - rc = -ENOMEM; + hwpt->domain = iommu_paging_domain_alloc(idev->dev); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; goto out_abort; } } @@ -213,7 +222,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, struct iommufd_hw_pagetable *hwpt; int rc; - if (flags || !user_data->len || !ops->domain_alloc_user) + if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || + !user_data->len || !ops->domain_alloc_user) return ERR_PTR(-EOPNOTSUPP); if (parent->auto_domain || !parent->nest_parent) return ERR_PTR(-EINVAL); @@ -227,7 +237,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, refcount_inc(&parent->common.obj.users); hwpt_nested->parent = parent; - hwpt->domain = ops->domain_alloc_user(idev->dev, flags, + hwpt->domain = ops->domain_alloc_user(idev->dev, + flags & ~IOMMU_HWPT_FAULT_ID_VALID, parent->common.domain, user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); @@ -308,6 +319,21 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) goto out_put_pt; } + if (cmd->flags & IOMMU_HWPT_FAULT_ID_VALID) { + struct iommufd_fault *fault; + + fault = iommufd_get_fault(ucmd, cmd->fault_id); + if (IS_ERR(fault)) { + rc = PTR_ERR(fault); + goto out_hwpt; + } + hwpt->fault = fault; + hwpt->domain->iopf_handler = iommufd_fault_iopf_handler; + hwpt->domain->fault_data = hwpt; + refcount_inc(&fault->obj.users); + iommufd_put_object(ucmd->ictx, &fault->obj); + } + cmd->out_hwpt_id = hwpt->obj.id; rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); if (rc) diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 991f864d1f9bc175b9acc9b9c445d32ed837f878..92efe30a8f0d00fa92197261b26c69c09fd4fd9a 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -11,6 +11,7 @@ #include #include #include +#include "../iommu-priv.h" struct iommu_domain; struct iommu_group; @@ -128,6 +129,7 @@ enum iommufd_object_type { IOMMUFD_OBJ_HWPT_NESTED, IOMMUFD_OBJ_IOAS, IOMMUFD_OBJ_ACCESS, + IOMMUFD_OBJ_FAULT, #ifdef CONFIG_IOMMUFD_TEST IOMMUFD_OBJ_SELFTEST, #endif @@ -292,6 +294,7 @@ int iommufd_check_iova_range(struct io_pagetable *iopt, struct iommufd_hw_pagetable { struct iommufd_object obj; struct iommu_domain *domain; + struct iommufd_fault *fault; }; struct iommufd_hwpt_paging { @@ -395,6 +398,9 @@ struct iommufd_device { /* always the physical device */ struct device *dev; bool enforce_cache_coherency; + /* protect iopf_enabled counter */ + struct mutex iopf_lock; + unsigned int iopf_enabled; }; static inline struct iommufd_device * @@ -426,6 +432,80 @@ void iopt_remove_access(struct io_pagetable *iopt, u32 iopt_access_list_id); void iommufd_access_destroy_object(struct iommufd_object *obj); +/* + * An iommufd_fault object represents an interface to deliver I/O page faults + * to the user space. These objects are created/destroyed by the user space and + * associated with hardware page table objects during page-table allocation. + */ +struct iommufd_fault { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct file *filep; + + /* The lists of outstanding faults protected by below mutex. */ + struct mutex mutex; + struct list_head deliver; + struct xarray response; + + struct wait_queue_head wait_queue; +}; + +struct iommufd_attach_handle { + struct iommu_attach_handle handle; + struct iommufd_device *idev; +}; + +/* Convert an iommu attach handle to iommufd handle. */ +#define to_iommufd_handle(hdl) container_of(hdl, struct iommufd_attach_handle, handle) + +static inline struct iommufd_fault * +iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_FAULT), + struct iommufd_fault, obj); +} + +int iommufd_fault_alloc(struct iommufd_ucmd *ucmd); +void iommufd_fault_destroy(struct iommufd_object *obj); +int iommufd_fault_iopf_handler(struct iopf_group *group); + +int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev); +void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev); +int iommufd_fault_domain_replace_dev(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old); + +static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + if (hwpt->fault) + return iommufd_fault_domain_attach_dev(hwpt, idev); + + return iommu_attach_group(hwpt->domain, idev->igroup->group); +} + +static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + if (hwpt->fault) + iommufd_fault_domain_detach_dev(hwpt, idev); + + iommu_detach_group(hwpt->domain, idev->igroup->group); +} + +static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev, + struct iommufd_hw_pagetable *hwpt, + struct iommufd_hw_pagetable *old) +{ + if (old->fault || hwpt->fault) + return iommufd_fault_domain_replace_dev(idev, hwpt, old); + + return iommu_group_replace_domain(idev->igroup->group, hwpt->domain); +} + #ifdef CONFIG_IOMMUFD_TEST int iommufd_test(struct iommufd_ucmd *ucmd); void iommufd_selftest_destroy(struct iommufd_object *obj); diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index e854d3f672051b5223e0fec8af741abf03bbffbd..acbbba1c66716e9fb67273436d8f39967b127bfa 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -22,6 +22,7 @@ enum { IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, IOMMU_TEST_OP_DIRTY, IOMMU_TEST_OP_MD_CHECK_IOTLB, + IOMMU_TEST_OP_TRIGGER_IOPF, }; enum { @@ -127,6 +128,13 @@ struct iommu_test_cmd { __u32 id; __u32 iotlb; } check_iotlb; + struct { + __u32 dev_id; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + } trigger_iopf; }; __u32 last; }; diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c index 89e6e11157f0ca67b3fa07241163d754523fcd24..6b09b7856789010c77257d6697f77e31809106fc 100644 --- a/drivers/iommu/iommufd/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -35,6 +35,9 @@ struct iova_bitmap_map { /* base IOVA representing bit 0 of the first page */ unsigned long iova; + /* mapped length */ + unsigned long length; + /* page size order that each bit granules to */ unsigned long pgshift; @@ -113,9 +116,6 @@ struct iova_bitmap { /* length of the IOVA range for the whole bitmap */ size_t length; - - /* length of the IOVA range set ahead the pinned pages */ - unsigned long set_ahead_length; }; /* @@ -156,6 +156,8 @@ static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap) return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip); } +static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap); + /* * Pins the bitmap user pages for the current range window. * This is internal to IOVA bitmap and called when advancing the @@ -206,6 +208,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap) * aligned. */ mapped->pgoff = offset_in_page(addr); + mapped->length = iova_bitmap_mapped_length(bitmap); return 0; } @@ -263,9 +266,6 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, goto err; } - rc = iova_bitmap_get(bitmap); - if (rc) - goto err; return bitmap; err: @@ -338,65 +338,34 @@ static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap) } /* - * Returns true if there's not more data to iterate. + * Returns true if [@iova..@iova+@length-1] is part of the mapped IOVA range. */ -static bool iova_bitmap_done(struct iova_bitmap *bitmap) +static bool iova_bitmap_mapped_range(struct iova_bitmap_map *mapped, + unsigned long iova, size_t length) { - return bitmap->mapped_base_index >= bitmap->mapped_total_index; -} - -static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap, - size_t set_ahead_length) -{ - int ret = 0; - - while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) { - unsigned long length = iova_bitmap_mapped_length(bitmap); - unsigned long iova = iova_bitmap_mapped_iova(bitmap); - - ret = iova_bitmap_get(bitmap); - if (ret) - break; - - length = min(length, set_ahead_length); - iova_bitmap_set(bitmap, iova, length); - - set_ahead_length -= length; - bitmap->mapped_base_index += - iova_bitmap_offset_to_index(bitmap, length - 1) + 1; - iova_bitmap_put(bitmap); - } - - bitmap->set_ahead_length = 0; - return ret; + return mapped->npages && + (iova >= mapped->iova && + (iova + length - 1) <= (mapped->iova + mapped->length - 1)); } /* - * Advances to the next range, releases the current pinned + * Advances to a selected range, releases the current pinned * pages and pins the next set of bitmap pages. * Returns 0 on success or otherwise errno. */ -static int iova_bitmap_advance(struct iova_bitmap *bitmap) +static int iova_bitmap_advance_to(struct iova_bitmap *bitmap, + unsigned long iova) { - unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1; - unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1; + unsigned long index; - bitmap->mapped_base_index += count; + index = iova_bitmap_offset_to_index(bitmap, iova - bitmap->iova); + if (index >= bitmap->mapped_total_index) + return -EINVAL; + bitmap->mapped_base_index = index; iova_bitmap_put(bitmap); - if (iova_bitmap_done(bitmap)) - return 0; - - /* Iterate, set and skip any bits requested for next iteration */ - if (bitmap->set_ahead_length) { - int ret; - - ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length); - if (ret) - return ret; - } - /* When advancing the index we pin the next set of bitmap pages */ + /* Pin the next set of bitmap pages */ return iova_bitmap_get(bitmap); } @@ -416,17 +385,7 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap) int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn) { - int ret = 0; - - for (; !iova_bitmap_done(bitmap) && !ret; - ret = iova_bitmap_advance(bitmap)) { - ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap), - iova_bitmap_mapped_length(bitmap), opaque); - if (ret) - break; - } - - return ret; + return fn(bitmap, bitmap->iova, bitmap->length, opaque); } EXPORT_SYMBOL_NS_GPL(iova_bitmap_for_each, IOMMUFD); @@ -444,11 +403,25 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length) { struct iova_bitmap_map *mapped = &bitmap->mapped; - unsigned long cur_bit = ((iova - mapped->iova) >> - mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; - unsigned long last_bit = (((iova + length - 1) - mapped->iova) >> - mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; - unsigned long last_page_idx = mapped->npages - 1; + unsigned long cur_bit, last_bit, last_page_idx; + +update_indexes: + if (unlikely(!iova_bitmap_mapped_range(mapped, iova, length))) { + + /* + * The attempt to advance the base index to @iova + * may fail if it's out of bounds, or pinning the pages + * returns an error. + */ + if (iova_bitmap_advance_to(bitmap, iova)) + return; + } + + last_page_idx = mapped->npages - 1; + cur_bit = ((iova - mapped->iova) >> + mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; + last_bit = (((iova + length - 1) - mapped->iova) >> + mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; do { unsigned int page_idx = cur_bit / BITS_PER_PAGE; @@ -457,18 +430,19 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, last_bit - cur_bit + 1); void *kaddr; - if (unlikely(page_idx > last_page_idx)) - break; + if (unlikely(page_idx > last_page_idx)) { + unsigned long left = + ((last_bit - cur_bit + 1) << mapped->pgshift); + + iova += (length - left); + length = left; + goto update_indexes; + } kaddr = kmap_local_page(mapped->pages[page_idx]); bitmap_set(kaddr, offset, nbits); kunmap_local(kaddr); cur_bit += nbits; } while (cur_bit <= last_bit); - - if (unlikely(cur_bit <= last_bit)) { - bitmap->set_ahead_length = - ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift); - } } EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD); diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 39b32932c61ee4e924e24ab18fc05ec4149829c9..83bbd7c5d160847044101234425e29e031407f98 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -319,6 +319,7 @@ static int iommufd_option(struct iommufd_ucmd *ucmd) union ucmd_buffer { struct iommu_destroy destroy; + struct iommu_fault_alloc fault; struct iommu_hw_info info; struct iommu_hwpt_alloc hwpt; struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap; @@ -355,6 +356,8 @@ struct iommufd_ioctl_op { } static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id), + IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_fault_alloc, struct iommu_fault_alloc, + out_fault_fd), IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info, __reserved), IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, @@ -513,6 +516,9 @@ static const struct iommufd_object_ops iommufd_object_ops[] = { .destroy = iommufd_hwpt_nested_destroy, .abort = iommufd_hwpt_nested_abort, }, + [IOMMUFD_OBJ_FAULT] = { + .destroy = iommufd_fault_destroy, + }, #ifdef CONFIG_IOMMUFD_TEST [IOMMUFD_OBJ_SELFTEST] = { .destroy = iommufd_selftest_destroy, diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 528f356238b343a72fefa609334062c9c91219f1..117f644a0c5b75c6701001b08b4986627c3bc8f2 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -809,13 +809,14 @@ static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + cur_pages = atomic_long_read(&pages->source_user->locked_vm); do { - cur_pages = atomic_long_read(&pages->source_user->locked_vm); new_pages = cur_pages + npages; if (new_pages > lock_limit) return -ENOMEM; - } while (atomic_long_cmpxchg(&pages->source_user->locked_vm, cur_pages, - new_pages) != cur_pages); + } while (!atomic_long_try_cmpxchg(&pages->source_user->locked_vm, + &cur_pages, new_pages)); return 0; } diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 7a2199470f3121da91e060bca82315a6944e37b8..f95e32e2913330d201c0ab6eb792625a538b1797 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -266,8 +266,8 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, /* Clear dirty */ if (mock_test_and_clear_dirty(mock, head, pgsize, flags)) - iommu_dirty_bitmap_record(dirty, head, pgsize); - iova = head + pgsize; + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; } while (iova < end); return 0; @@ -504,6 +504,8 @@ static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) return false; } +static struct iopf_queue *mock_iommu_iopf_queue; + static struct iommu_device mock_iommu_device = { }; @@ -514,6 +516,29 @@ static struct iommu_device *mock_probe_device(struct device *dev) return &mock_iommu_device; } +static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt, + struct iommu_page_response *msg) +{ +} + +static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) +{ + if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue) + return -ENODEV; + + return iopf_queue_add_device(mock_iommu_iopf_queue, dev); +} + +static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) +{ + if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue) + return -ENODEV; + + iopf_queue_remove_device(mock_iommu_iopf_queue, dev); + + return 0; +} + static const struct iommu_ops mock_ops = { /* * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type() @@ -529,6 +554,10 @@ static const struct iommu_ops mock_ops = { .capable = mock_domain_capable, .device_group = generic_device_group, .probe_device = mock_probe_device, + .page_response = mock_domain_page_response, + .dev_enable_feat = mock_dev_enable_feat, + .dev_disable_feat = mock_dev_disable_feat, + .user_pasid_table = true, .default_domain_ops = &(struct iommu_domain_ops){ .free = mock_domain_free, @@ -1334,7 +1363,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, } max = length / page_size; - bitmap_size = max / BITS_PER_BYTE; + bitmap_size = DIV_ROUND_UP(max, BITS_PER_BYTE); tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); if (!tmp) { @@ -1375,6 +1404,31 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, return rc; } +static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd, + struct iommu_test_cmd *cmd) +{ + struct iopf_fault event = { }; + struct iommufd_device *idev; + + idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id); + if (IS_ERR(idev)) + return PTR_ERR(idev); + + event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID) + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + event.fault.type = IOMMU_FAULT_PAGE_REQ; + event.fault.prm.addr = cmd->trigger_iopf.addr; + event.fault.prm.pasid = cmd->trigger_iopf.pasid; + event.fault.prm.grpid = cmd->trigger_iopf.grpid; + event.fault.prm.perm = cmd->trigger_iopf.perm; + + iommu_report_device_fault(idev->dev, &event); + iommufd_put_object(ucmd->ictx, &idev->obj); + + return 0; +} + void iommufd_selftest_destroy(struct iommufd_object *obj) { struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); @@ -1450,6 +1504,8 @@ int iommufd_test(struct iommufd_ucmd *ucmd) cmd->dirty.page_size, u64_to_user_ptr(cmd->dirty.uptr), cmd->dirty.flags); + case IOMMU_TEST_OP_TRIGGER_IOPF: + return iommufd_test_trigger_iopf(ucmd, cmd); default: return -EOPNOTSUPP; } @@ -1491,6 +1547,9 @@ int __init iommufd_test_init(void) &iommufd_mock_bus_type.nb); if (rc) goto err_sysfs; + + mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq"); + return 0; err_sysfs: @@ -1506,6 +1565,11 @@ int __init iommufd_test_init(void) void iommufd_test_exit(void) { + if (mock_iommu_iopf_queue) { + iopf_queue_free(mock_iommu_iopf_queue); + mock_iommu_iopf_queue = NULL; + } + iommu_device_sysfs_remove(&mock_iommu_device); iommu_device_unregister_bus(&mock_iommu_device, &iommufd_mock_bus_type.bus, diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index d59d0ea2fd21911a3285f73a0a20d1ab4fe39464..16c6adff3eb7bf7465fe5c347f8ff21c34ea3090 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -1000,4 +1000,5 @@ void iova_cache_put(void) EXPORT_SYMBOL_GPL(iova_cache_put); MODULE_AUTHOR("Anil S Keshavamurthy "); +MODULE_DESCRIPTION("IOMMU I/O Virtual Address management"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 2f7281ccc05f64778a7f70309dd054192bb1ae74..c2443659812a8036b632529a161450c6100ba79d 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -24,6 +24,8 @@ int no_x2apic_optout; int disable_irq_post = 0; +bool enable_posted_msi __ro_after_init; + static int disable_irq_remap; static struct irq_remap_ops *remap_ops; @@ -70,7 +72,8 @@ static __init int setup_irqremap(char *str) no_x2apic_optout = 1; else if (!strncmp(str, "nopost", 6)) disable_irq_post = 1; - + else if (IS_ENABLED(CONFIG_X86_POSTED_MSI) && !strncmp(str, "posted_msi", 10)) + enable_posted_msi = true; str += strcspn(str, ","); while (*str == ',') str++; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index d6e4002200bd33d6219ed09f1c90ccac0e3404e4..c6ea5b4baff3be9d3b518175ba5816e69f94e132 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -401,7 +401,6 @@ static const struct iommu_ops mtk_iommu_v1_ops; static int mtk_iommu_v1_create_mapping(struct device *dev, const struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct mtk_iommu_v1_data *data; struct platform_device *m4updev; struct dma_iommu_mapping *mtk_mapping; @@ -413,14 +412,9 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, return -EINVAL; } - if (!fwspec) { - ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops); - if (ret) - return ret; - fwspec = dev_iommu_fwspec_get(dev); - } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) { - return -EINVAL; - } + ret = iommu_fwspec_init(dev, of_fwnode_handle(args->np)); + if (ret) + return ret; if (!dev_iommu_priv_get(dev)) { /* Get the m4u device */ diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 3afe0b48a48db937baa711dc4264d865a9c5f0d7..78d61da75257c258cfaed37869220d32ad555eeb 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -17,30 +17,25 @@ #include #include +#include "iommu-priv.h" + static int of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) { const struct iommu_ops *ops; - struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; int ret; - ops = iommu_ops_from_fwnode(fwnode); - if ((ops && !ops->of_xlate) || - !of_device_is_available(iommu_spec->np)) + if (!of_device_is_available(iommu_spec->np)) return -ENODEV; - ret = iommu_fwspec_init(dev, fwnode, ops); + ret = iommu_fwspec_init(dev, of_fwnode_handle(iommu_spec->np)); + if (ret == -EPROBE_DEFER) + return driver_deferred_probe_check_state(dev); if (ret) return ret; - /* - * The otherwise-empty fwspec handily serves to indicate the specific - * IOMMU device we're waiting for, which will be useful if we ever get - * a proper probe-ordering dependency mechanism in future. - */ - if (!ops) - return driver_deferred_probe_check_state(dev); - if (!try_module_get(ops->owner)) + ops = iommu_ops_from_fwnode(&iommu_spec->np->fwnode); + if (!ops->of_xlate || !try_module_get(ops->owner)) return -ENODEV; ret = ops->of_xlate(dev, iommu_spec); @@ -105,6 +100,14 @@ static int of_iommu_configure_device(struct device_node *master_np, of_iommu_configure_dev(master_np, dev); } +static void of_pci_check_device_ats(struct device *dev, struct device_node *np) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + + if (fwspec && of_property_read_bool(np, "ats-supported")) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; +} + /* * Returns: * 0 on success, an iommu was configured @@ -115,7 +118,6 @@ static int of_iommu_configure_device(struct device_node *master_np, int of_iommu_configure(struct device *dev, struct device_node *master_np, const u32 *id) { - struct iommu_fwspec *fwspec; int err; if (!master_np) @@ -123,14 +125,9 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np, /* Serialise to make dev->iommu stable under our potential fwspec */ mutex_lock(&iommu_probe_device_lock); - fwspec = dev_iommu_fwspec_get(dev); - if (fwspec) { - if (fwspec->ops) { - mutex_unlock(&iommu_probe_device_lock); - return 0; - } - /* In the deferred case, start again from scratch */ - iommu_fwspec_free(dev); + if (dev_iommu_fwspec_get(dev)) { + mutex_unlock(&iommu_probe_device_lock); + return 0; } /* @@ -147,23 +144,21 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np, pci_request_acs(); err = pci_for_each_dma_alias(to_pci_dev(dev), of_pci_iommu_init, &info); + of_pci_check_device_ats(dev, master_np); } else { err = of_iommu_configure_device(master_np, dev, id); } - mutex_unlock(&iommu_probe_device_lock); - if (err == -ENODEV || err == -EPROBE_DEFER) - return err; if (err) - goto err_log; + iommu_fwspec_free(dev); + mutex_unlock(&iommu_probe_device_lock); - err = iommu_probe_device(dev); - if (err) - goto err_log; - return 0; + if (!err && dev->bus) + err = iommu_probe_device(dev); + + if (err && err != -EPROBE_DEFER) + dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); -err_log: - dev_dbg(dev, "Adding to IOMMU failed: %pe\n", ERR_PTR(err)); return err; } diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 9a5196f523de5f0f8902f054b1b3722d14b18cd2..d8eaa7ea380bb0f5086c50ad4e82cb88c0c378a8 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -695,11 +695,6 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, return size; } -static void s390_iommu_probe_finalize(struct device *dev) -{ - iommu_setup_dma_ops(dev, 0, U64_MAX); -} - struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) { if (!zdev || !zdev->s390_domain) @@ -785,7 +780,6 @@ static const struct iommu_ops s390_iommu_ops = { .capable = s390_iommu_capable, .domain_alloc_paging = s390_domain_alloc_paging, .probe_device = s390_iommu_probe_device, - .probe_finalize = s390_iommu_probe_finalize, .release_device = s390_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = SZ_4K, diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index dd3f07384624c51b1655b3377d8dfa5cd08906da..8d8f11854676c055d5398af893453aa53b499750 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -602,6 +602,14 @@ static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova, u32 *page_table, *pte_addr; int ret = 0; + /* the IOMMU can only handle 32-bit addresses, both input and output */ + if ((uint64_t)paddr >> 32) { + ret = -EINVAL; + dev_warn_once(iommu->dev, + "attempt to map address beyond 4GB\n"); + goto out; + } + page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp); if (IS_ERR(page_table)) { ret = PTR_ERR(page_table); @@ -682,7 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev) if (!sun50i_domain) return NULL; - sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL, get_order(DT_SIZE)); + sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, + get_order(DT_SIZE)); if (!sun50i_domain->dt) goto err_free_domain; @@ -997,7 +1006,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev) iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev), PT_SIZE, PT_SIZE, - SLAB_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA32, NULL); if (!iommu->pt_pool) return -ENOMEM; @@ -1058,6 +1067,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev) static const struct of_device_id sun50i_iommu_dt[] = { { .compatible = "allwinner,sun50i-h6-iommu", }, + { .compatible = "allwinner,sun50i-h616-iommu", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, sun50i_iommu_dt); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index f86c7ae91814ff697c8f8b63c9fc183d0e74fd08..4365d9936e68676f50da245e287005675ae1f44e 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -837,7 +837,7 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, const struct iommu_ops *ops = smmu->iommu.ops; int err; - err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); + err = iommu_fwspec_init(dev, of_fwnode_handle(dev->of_node)); if (err < 0) { dev_err(dev, "failed to initialize fwspec: %d\n", err); return err; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 04048f64a2c0f436df56791e88eba6f9ba3e3f1f..36d680826b5733ad6c322e493562b951d2f81509 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1025,15 +1025,6 @@ static struct iommu_device *viommu_probe_device(struct device *dev) return ERR_PTR(ret); } -static void viommu_probe_finalize(struct device *dev) -{ -#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS - /* First clear the DMA ops in case we're switching from a DMA domain */ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); -#endif -} - static void viommu_release_device(struct device *dev) { struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); @@ -1073,7 +1064,6 @@ static struct iommu_ops viommu_ops = { .capable = viommu_capable, .domain_alloc = viommu_domain_alloc, .probe_device = viommu_probe_device, - .probe_finalize = viommu_probe_finalize, .release_device = viommu_release_device, .device_group = viommu_device_group, .get_resv_regions = viommu_get_resv_regions, @@ -1261,7 +1251,6 @@ MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver virtio_iommu_drv = { .driver.name = KBUILD_MODNAME, - .driver.owner = THIS_MODULE, .id_table = id_table, .feature_table = features, .feature_table_size = ARRAY_SIZE(features), diff --git a/drivers/of/device.c b/drivers/of/device.c index 873d933e8e6d1da619ea1ae88b0b9605656ba678..dc98aae0567743e5d35a17c23ca9ad51765446fd 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -95,10 +95,8 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, { const struct bus_dma_region *map = NULL; struct device_node *bus_np; - u64 dma_start = 0; - u64 mask, end, size = 0; - bool coherent; - int iommu_ret; + u64 mask, end = 0; + bool coherent, set_map = false; int ret; if (np == dev->of_node) @@ -117,34 +115,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, if (!force_dma) return ret == -ENODEV ? 0 : ret; } else { - const struct bus_dma_region *r = map; - u64 dma_end = 0; - /* Determine the overall bounds of all DMA regions */ - for (dma_start = ~0; r->size; r++) { - /* Take lower and upper limits */ - if (r->dma_start < dma_start) - dma_start = r->dma_start; - if (r->dma_start + r->size > dma_end) - dma_end = r->dma_start + r->size; - } - size = dma_end - dma_start; - - /* - * Add a work around to treat the size as mask + 1 in case - * it is defined in DT as a mask. - */ - if (size & 1) { - dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n", - size); - size = size + 1; - } - - if (!size) { - dev_err(dev, "Adjusted size 0x%llx invalid\n", size); - kfree(map); - return -EINVAL; - } + end = dma_range_map_max(map); + set_map = true; } /* @@ -158,21 +131,20 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, dev->dma_mask = &dev->coherent_dma_mask; } - if (!size && dev->coherent_dma_mask) - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); - else if (!size) - size = 1ULL << 32; + if (!end && dev->coherent_dma_mask) + end = dev->coherent_dma_mask; + else if (!end) + end = (1ULL << 32) - 1; /* * Limit coherent and dma mask based on size and default mask * set by the driver. */ - end = dma_start + size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ - if (!ret) { + if (set_map) { dev->bus_dma_limit = end; dev->dma_range_map = map; } @@ -181,29 +153,21 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, dev_dbg(dev, "device is%sdma coherent\n", coherent ? " " : " not "); - iommu_ret = of_iommu_configure(dev, np, id); - if (iommu_ret == -EPROBE_DEFER) { + ret = of_iommu_configure(dev, np, id); + if (ret == -EPROBE_DEFER) { /* Don't touch range map if it wasn't set from a valid dma-ranges */ - if (!ret) + if (set_map) dev->dma_range_map = NULL; kfree(map); return -EPROBE_DEFER; - } else if (iommu_ret == -ENODEV) { - dev_dbg(dev, "device is not behind an iommu\n"); - } else if (iommu_ret) { - dev_err(dev, "iommu configuration for device failed with %pe\n", - ERR_PTR(iommu_ret)); - - /* - * Historically this routine doesn't fail driver probing - * due to errors in of_iommu_configure() - */ - } else - dev_dbg(dev, "device is behind an iommu\n"); + } + /* Take all other IOMMU errors to mean we'll just carry on without it */ + dev_dbg(dev, "device is%sbehind an iommu\n", + !ret ? " " : " not "); - arch_setup_dma_ops(dev, dma_start, size, coherent); + arch_setup_dma_ops(dev, coherent); - if (iommu_ret) + if (ret) of_dma_set_restricted_buffer(dev, np); return 0; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 5770f3b374ece370fc6d69567043d969b68a7991..042253460b983e4c86c644883af073e7b9b0f503 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -227,7 +227,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, */ trace_swiotlb_bounced(dev, dev_addr, size); - map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs); + map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs); if (map == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index f4932790dae1dc068d8bf453a7cc5234cd5973f7..997eb429b0887ff8cc9feb9d04f3dfb5d2ca4a3e 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -9,8 +9,13 @@ #ifndef __ACPI_BUS_H__ #define __ACPI_BUS_H__ +#include +#include #include +#include +#include #include +#include /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 @@ -122,8 +127,8 @@ static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile( } struct acpi_scan_handler { - const struct acpi_device_id *ids; struct list_head list_node; + const struct acpi_device_id *ids; bool (*match)(const char *idstr, const struct acpi_device_id **matchid); int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id); void (*detach)(struct acpi_device *dev); @@ -137,11 +142,15 @@ struct acpi_scan_handler { * -------------------- */ +typedef int (*acpi_hp_notify) (struct acpi_device *, u32); +typedef void (*acpi_hp_uevent) (struct acpi_device *, u32); +typedef void (*acpi_hp_fixup) (struct acpi_device *); + struct acpi_hotplug_context { struct acpi_device *self; - int (*notify)(struct acpi_device *, u32); - void (*uevent)(struct acpi_device *, u32); - void (*fixup)(struct acpi_device *); + acpi_hp_notify notify; + acpi_hp_uevent uevent; + acpi_hp_fixup fixup; }; /* @@ -267,6 +276,7 @@ struct acpi_device_power_flags { }; struct acpi_device_power_state { + struct list_head resources; /* Power resources referenced */ struct { u8 valid:1; u8 explicit_set:1; /* _PSx present? */ @@ -274,7 +284,6 @@ struct acpi_device_power_state { } flags; int power; /* % Power (compared to D0) */ int latency; /* Dx->D0 time (microseconds) */ - struct list_head resources; /* Power resources referenced */ }; struct acpi_device_power { @@ -342,16 +351,16 @@ struct acpi_device_wakeup { }; struct acpi_device_physical_node { - unsigned int node_id; struct list_head node; struct device *dev; + unsigned int node_id; bool put_online:1; }; struct acpi_device_properties { + struct list_head list; const guid_t *guid; union acpi_object *properties; - struct list_head list; void **bufs; }; @@ -413,12 +422,12 @@ struct acpi_device { /* Non-device subnode */ struct acpi_data_node { + struct list_head sibling; const char *name; acpi_handle handle; struct fwnode_handle fwnode; struct fwnode_handle *parent; struct acpi_device_data data; - struct list_head sibling; struct kobject kobj; struct completion kobj_done; }; @@ -503,8 +512,7 @@ static inline void acpi_set_hp_context(struct acpi_device *adev, void acpi_initialize_hp_context(struct acpi_device *adev, struct acpi_hotplug_context *hp, - int (*notify)(struct acpi_device *, u32), - void (*uevent)(struct acpi_device *, u32)); + acpi_hp_notify notify, acpi_hp_uevent uevent); /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; @@ -649,8 +657,7 @@ struct iommu_ops; bool acpi_dma_supported(const struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_iommu_fwspec_init(struct device *dev, u32 id, - struct fwnode_handle *fwnode, - const struct iommu_ops *ops); + struct fwnode_handle *fwnode); int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map); int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, const u32 *input_id); diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 1cb65592c95dd3376e6ca66b629bb69bb11a5dd7..d4ed5622cf2b02060e6f748177e6286afff094e2 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -39,7 +39,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head); /* IOMMU interface */ -int iort_dma_get_ranges(struct device *dev, u64 *size); +int iort_dma_get_ranges(struct device *dev, u64 *limit); int iort_iommu_configure_id(struct device *dev, const u32 *id_in); void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head); phys_addr_t acpi_iort_dma_get_max_cpu_address(void); @@ -55,7 +55,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *hea static inline void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { } /* IOMMU interface */ -static inline int iort_dma_get_ranges(struct device *dev, u64 *size) +static inline int iort_dma_get_ranges(struct device *dev, u64 *limit) { return -ENODEV; } static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) { return -ENODEV; } diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 18aade195884d7b3e3899dc89175e300f58434e0..185ef49b2a913a99b8c4b676426c20680176bc7f 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -49,6 +49,24 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev, return (phys_addr_t)-1; } +static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map) +{ + dma_addr_t ret = (dma_addr_t)U64_MAX; + + for (; map->size; map++) + ret = min(ret, map->dma_start); + return ret; +} + +static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map) +{ + dma_addr_t ret = 0; + + for (; map->size; map++) + ret = max(ret, map->dma_start + map->size - 1); + return ret; +} + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include #ifndef phys_to_dma_unencrypted diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 3888a21cf4ffdb59089810a7f57ce8e4854cfc2d..cc848e7380f7f35632f1d097a2feaa52b52cb440 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -433,11 +433,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, #endif #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent); +void arch_setup_dma_ops(struct device *dev, bool coherent); #else -static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, bool coherent) +static inline void arch_setup_dma_ops(struct device *dev, bool coherent) { } #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ @@ -520,17 +518,10 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, #if defined CONFIG_PCI && defined CONFIG_X86 extern bool is_zhaoxin_kh40000; -extern const struct dma_map_ops kh40000_dma_direct_ops; -void kh40000_set_iommu_dma_ops(struct device *dev); #else bool __weak is_zhaoxin_kh40000; -static inline void kh40000_set_iommu_dma_ops(struct device *dev) -{ - -} - #endif diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 86cf1f7ae389a40180b86dd6850102f6fe04c188..f9a81761bfceda1a3b5175c661d20e7de76b88ab 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -85,6 +85,8 @@ struct io_pgtable_cfg { * * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability * attributes set in the TCR for a non-coherent page-table walker. + * + * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -92,6 +94,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) + #define IO_PGTABLE_QUIRK_ARM_HD BIT(7) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 4edcc5a979b2db5e1fb395414b221a8e9e6e5807..32d0d7f7cb08e668376b648955a945469621bdcc 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -124,12 +124,16 @@ struct iopf_fault { struct iopf_group { struct iopf_fault last_fault; struct list_head faults; + size_t fault_count; /* list node for iommu_fault_param::faults */ struct list_head pending_node; struct work_struct work; - struct iommu_domain *domain; + struct iommu_attach_handle *attach_handle; /* The device's fault data parameter. */ struct iommu_fault_param *fault_param; + /* Used by handler provider to hook the group on its own lists. */ + struct list_head node; + u32 cookie; }; /** @@ -555,6 +559,10 @@ static inline int __iommu_copy_struct_from_user_array( * @default_domain: If not NULL this will always be set as the default domain. * This should be an IDENTITY/BLOCKED/PLATFORM domain. * Do not use in new drivers. + * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is + * no user domain for each PASID and the I/O page faults are + * forwarded through the user domain attached to the device + * RID. */ struct iommu_ops { bool (*capable)(struct device *dev, enum iommu_cap); @@ -598,6 +606,7 @@ struct iommu_ops { struct iommu_domain *blocked_domain; struct iommu_domain *release_domain; struct iommu_domain *default_domain; + u8 user_pasid_table:1; CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) @@ -797,6 +806,7 @@ extern bool iommu_present(const struct bus_type *bus); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool iommu_group_has_isolated_msi(struct iommu_group *group); extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); +struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); @@ -1000,7 +1010,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev); /** * struct iommu_fwspec - per-device IOMMU instance data - * @ops: ops for this device's IOMMU * @iommu_fwnode: firmware handle for this device's IOMMU * @flags: IOMMU_FWSPEC_* flags * @num_ids: number of associated device IDs @@ -1011,7 +1020,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev); * consumers. */ struct iommu_fwspec { - const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; u32 flags; unsigned int num_ids; @@ -1021,27 +1029,33 @@ struct iommu_fwspec { /* ATS is supported */ #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) +/* + * An iommu attach handle represents a relationship between an iommu domain + * and a PASID or RID of a device. It is allocated and managed by the component + * that manages the domain and is stored in the iommu group during the time the + * domain is attached. + */ +struct iommu_attach_handle { + struct iommu_domain *domain; +}; + /** * struct iommu_sva - handle to a device-mm bond */ struct iommu_sva { + struct iommu_attach_handle handle; struct device *dev; - struct iommu_domain *domain; - struct list_head handle_item; refcount_t users; }; struct iommu_mm_data { u32 pasid; struct list_head sva_domains; - struct list_head sva_handles; }; -int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, - const struct iommu_ops *ops); +int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); -const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode); static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { @@ -1084,12 +1098,10 @@ int iommu_device_claim_dma_owner(struct device *dev, void *owner); void iommu_device_release_dma_owner(struct device *dev); int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid); + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle); void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); -struct iommu_domain * -iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, - unsigned int type); ioasid_t iommu_alloc_global_pasid(struct device *dev); void iommu_free_global_pasid(ioasid_t pasid); #else /* CONFIG_IOMMU_API */ @@ -1118,6 +1130,11 @@ static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus return NULL; } +static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + static inline void iommu_domain_free(struct iommu_domain *domain) { } @@ -1347,8 +1364,7 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) } static inline int iommu_fwspec_init(struct device *dev, - struct fwnode_handle *iommu_fwnode, - const struct iommu_ops *ops) + struct fwnode_handle *iommu_fwnode) { return -ENODEV; } @@ -1363,12 +1379,6 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, return -ENODEV; } -static inline -const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) -{ - return NULL; -} - static inline int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) { @@ -1420,7 +1430,8 @@ static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) } static inline int iommu_attach_device_pasid(struct iommu_domain *domain, - struct device *dev, ioasid_t pasid) + struct device *dev, ioasid_t pasid, + struct iommu_attach_handle *handle) { return -ENODEV; } @@ -1430,13 +1441,6 @@ static inline void iommu_detach_device_pasid(struct iommu_domain *domain, { } -static inline struct iommu_domain * -iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, - unsigned int type) -{ - return NULL; -} - static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) { return IOMMU_PASID_INVALID; @@ -1480,9 +1484,6 @@ static inline void iommu_debugfs_setup(void) {} #ifdef CONFIG_IOMMU_DMA #include -/* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); - int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); @@ -1493,10 +1494,6 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); struct msi_desc; struct msi_msg; -static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) -{ -} - static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { return -ENODEV; @@ -1566,8 +1563,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm); void iommu_sva_unbind_device(struct iommu_sva *handle); u32 iommu_sva_get_pasid(struct iommu_sva *handle); -struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, - struct mm_struct *mm); #else static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) @@ -1592,12 +1587,6 @@ static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) } static inline void mm_pasid_drop(struct mm_struct *mm) {} - -static inline struct iommu_domain * -iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) -{ - return NULL; -} #endif /* CONFIG_IOMMU_SVA */ #ifdef CONFIG_IOMMU_IOPF diff --git a/include/linux/iova.h b/include/linux/iova.h index 83c00fac2acb1d952edb3d82963b500f4d803047..d2c4fd923efabcb61f6a8d6e190cbea6b8dda37a 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -65,6 +65,11 @@ static inline size_t iova_align(struct iova_domain *iovad, size_t size) return ALIGN(size, iovad->granule); } +static inline size_t iova_align_down(struct iova_domain *iovad, size_t size) +{ + return ALIGN_DOWN(size, iovad->granule); +} + static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) { return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ecde0312dd5203a125e21846235ab5b1acae6cb7..05e6f1b3474ee878e674e8c551f94fa4c122ed00 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -43,7 +43,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, - size_t mapping_size, size_t alloc_size, + size_t mapping_size, unsigned int alloc_aligned_mask, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 1dfeaa2e649ee41751162073463df20cb0a130ad..ede2b464a7619ad24d0cb06a9fe6f0c63f64285b 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -50,6 +50,7 @@ enum { IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, IOMMUFD_CMD_HWPT_INVALIDATE, + IOMMUFD_CMD_FAULT_QUEUE_ALLOC, }; /** @@ -356,10 +357,13 @@ struct iommu_vfio_ioas { * the parent HWPT in a nesting configuration. * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is * enforced on device attachment + * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is + * valid. */ enum iommufd_hwpt_alloc_flags { IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, + IOMMU_HWPT_FAULT_ID_VALID = 1 << 2, }; /** @@ -411,6 +415,9 @@ enum iommu_hwpt_data_type { * @data_type: One of enum iommu_hwpt_data_type * @data_len: Length of the type specific data * @data_uptr: User pointer to the type specific data + * @fault_id: The ID of IOMMUFD_FAULT object. Valid only if flags field of + * IOMMU_HWPT_FAULT_ID_VALID is set. + * @__reserved2: Padding to 64-bit alignment. Must be 0. * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the @@ -441,6 +448,8 @@ struct iommu_hwpt_alloc { __u32 data_type; __u32 data_len; __aligned_u64 data_uptr; + __u32 fault_id; + __u32 __reserved2; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) @@ -692,4 +701,104 @@ struct iommu_hwpt_invalidate { __u32 __reserved; }; #define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE) + +/** + * enum iommu_hwpt_pgfault_flags - flags for struct iommu_hwpt_pgfault + * @IOMMU_PGFAULT_FLAGS_PASID_VALID: The pasid field of the fault data is + * valid. + * @IOMMU_PGFAULT_FLAGS_LAST_PAGE: It's the last fault of a fault group. + */ +enum iommu_hwpt_pgfault_flags { + IOMMU_PGFAULT_FLAGS_PASID_VALID = (1 << 0), + IOMMU_PGFAULT_FLAGS_LAST_PAGE = (1 << 1), +}; + +/** + * enum iommu_hwpt_pgfault_perm - perm bits for struct iommu_hwpt_pgfault + * @IOMMU_PGFAULT_PERM_READ: request for read permission + * @IOMMU_PGFAULT_PERM_WRITE: request for write permission + * @IOMMU_PGFAULT_PERM_EXEC: (PCIE 10.4.1) request with a PASID that has the + * Execute Requested bit set in PASID TLP Prefix. + * @IOMMU_PGFAULT_PERM_PRIV: (PCIE 10.4.1) request with a PASID that has the + * Privileged Mode Requested bit set in PASID TLP + * Prefix. + */ +enum iommu_hwpt_pgfault_perm { + IOMMU_PGFAULT_PERM_READ = (1 << 0), + IOMMU_PGFAULT_PERM_WRITE = (1 << 1), + IOMMU_PGFAULT_PERM_EXEC = (1 << 2), + IOMMU_PGFAULT_PERM_PRIV = (1 << 3), +}; + +/** + * struct iommu_hwpt_pgfault - iommu page fault data + * @flags: Combination of enum iommu_hwpt_pgfault_flags + * @dev_id: id of the originated device + * @pasid: Process Address Space ID + * @grpid: Page Request Group Index + * @perm: Combination of enum iommu_hwpt_pgfault_perm + * @addr: Fault address + * @length: a hint of how much data the requestor is expecting to fetch. For + * example, if the PRI initiator knows it is going to do a 10MB + * transfer, it could fill in 10MB and the OS could pre-fault in + * 10MB of IOVA. It's default to 0 if there's no such hint. + * @cookie: kernel-managed cookie identifying a group of fault messages. The + * cookie number encoded in the last page fault of the group should + * be echoed back in the response message. + */ +struct iommu_hwpt_pgfault { + __u32 flags; + __u32 dev_id; + __u32 pasid; + __u32 grpid; + __u32 perm; + __u64 addr; + __u32 length; + __u32 cookie; +}; + +/** + * enum iommufd_page_response_code - Return status of fault handlers + * @IOMMUFD_PAGE_RESP_SUCCESS: Fault has been handled and the page tables + * populated, retry the access. This is the + * "Success" defined in PCI 10.4.2.1. + * @IOMMUFD_PAGE_RESP_INVALID: Could not handle this fault, don't retry the + * access. This is the "Invalid Request" in PCI + * 10.4.2.1. + * @IOMMUFD_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from + * this device if possible. This is the "Response + * Failure" in PCI 10.4.2.1. + */ +enum iommufd_page_response_code { + IOMMUFD_PAGE_RESP_SUCCESS = 0, + IOMMUFD_PAGE_RESP_INVALID, + IOMMUFD_PAGE_RESP_FAILURE, +}; + +/** + * struct iommu_hwpt_page_response - IOMMU page fault response + * @cookie: The kernel-managed cookie reported in the fault message. + * @code: One of response code in enum iommufd_page_response_code. + */ +struct iommu_hwpt_page_response { + __u32 cookie; + __u32 code; +}; + +/** + * struct iommu_fault_alloc - ioctl(IOMMU_FAULT_QUEUE_ALLOC) + * @size: sizeof(struct iommu_fault_alloc) + * @flags: Must be 0 + * @out_fault_id: The ID of the new FAULT + * @out_fault_fd: The fd of the new FAULT + * + * Explicitly allocate a fault handling object. + */ +struct iommu_fault_alloc { + __u32 size; + __u32 flags; + __u32 out_fault_id; + __u32 out_fault_fd; +}; +#define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC) #endif diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 0ff132dd86a8dd683fcd27e97580ddb9f4ffa3b9..5bc41e33fa5c78c2f5a46fd3486a22bd5d716374 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -227,9 +227,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit) dma_numa_cma_reserve(); - if (is_zhaoxin_kh40000) - return; - pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { diff --git a/kernel/dma/phytium/pswiotlb-iommu.c b/kernel/dma/phytium/pswiotlb-iommu.c index 0dcde77249dd564e2c214c5294379bedd22f0a32..4c3f6b9ac9f1ef42fd522fdafdc06409f1a4f111 100644 --- a/kernel/dma/phytium/pswiotlb-iommu.c +++ b/kernel/dma/phytium/pswiotlb-iommu.c @@ -768,7 +768,7 @@ dma_addr_t pswiotlb_iommu_dma_map_page(struct device *dev, struct page *page, } aligned_size = iova_align(iovad, size); - phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, + phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir, attrs); if (phys == DMA_MAPPING_ERROR) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index e7c3fbd0737ec0f34f28206bc91c47cdaed9e3c1..2c6cbeab80213d51f99b781267d7443342a49f56 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1298,15 +1298,40 @@ static unsigned long mem_used(struct io_tlb_mem *mem) #endif /* CONFIG_DEBUG_FS */ +/** + * swiotlb_tbl_map_single() - bounce buffer map a single contiguous physical area + * @dev: Device which maps the buffer. + * @orig_addr: Original (non-bounced) physical IO buffer address + * @mapping_size: Requested size of the actual bounce buffer, excluding + * any pre- or post-padding for alignment + * @alloc_align_mask: Required start and end alignment of the allocated buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Find and allocate a suitable sequence of IO TLB slots for the request. + * The allocated space starts at an alignment specified by alloc_align_mask, + * and the size of the allocated space is rounded up so that the total amount + * of allocated space is a multiple of (alloc_align_mask + 1). If + * alloc_align_mask is zero, the allocated space may be at any alignment and + * the size is not rounded up. + * + * The returned address is within the allocated space and matches the bits + * of orig_addr that are specified in the DMA min_align_mask for the device. As + * such, this returned address may be offset from the beginning of the allocated + * space. The bounce buffer space starting at the returned address for + * mapping_size bytes is initialized to the contents of the original IO buffer + * area. Any pre-padding (due to an offset) and any post-padding (due to + * rounding-up the size) is not initialized. + */ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, - size_t mapping_size, size_t alloc_size, - unsigned int alloc_align_mask, enum dma_data_direction dir, - unsigned long attrs) + size_t mapping_size, unsigned int alloc_align_mask, + enum dma_data_direction dir, unsigned long attrs) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; unsigned int offset; struct io_tlb_pool *pool; unsigned int i; + size_t size; int index; phys_addr_t tlb_addr; unsigned short pad_slots; @@ -1320,20 +1345,24 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); - if (mapping_size > alloc_size) { - dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", - mapping_size, alloc_size); - return (phys_addr_t)DMA_MAPPING_ERROR; - } + /* + * The default swiotlb memory pool is allocated with PAGE_SIZE + * alignment. If a mapping is requested with larger alignment, + * the mapping may be unable to use the initial slot(s) in all + * sets of IO_TLB_SEGSIZE slots. In such case, a mapping request + * of or near the maximum mapping size would always fail. + */ + dev_WARN_ONCE(dev, alloc_align_mask > ~PAGE_MASK, + "Alloc alignment may prevent fulfilling requests with max mapping_size\n"); offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr); - index = swiotlb_find_slots(dev, orig_addr, - alloc_size + offset, alloc_align_mask, &pool); + size = ALIGN(mapping_size + offset, alloc_align_mask + 1); + index = swiotlb_find_slots(dev, orig_addr, size, alloc_align_mask, &pool); if (index == -1) { if (!(attrs & DMA_ATTR_NO_WARN)) dev_warn_ratelimited(dev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - alloc_size, mem->nslabs, mem_used(mem)); + size, mem->nslabs, mem_used(mem)); return (phys_addr_t)DMA_MAPPING_ERROR; } @@ -1346,7 +1375,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, offset &= (IO_TLB_SIZE - 1); index += pad_slots; pool->slots[index].pad_slots = pad_slots; - for (i = 0; i < nr_slots(alloc_size + offset); i++) + for (i = 0; i < (nr_slots(size) - pad_slots); i++) pool->slots[index + i].orig_addr = slot_addr(orig_addr, i); tlb_addr = slot_addr(pool->start, index) + offset; /* @@ -1498,8 +1527,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); - swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, - attrs); + swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, 0, dir, attrs); if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) return DMA_MAPPING_ERROR; diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index c8d0f13c91ddecb219c4f5879c59e596b17da50a..4927b9add5add913c955d2f63b884dd0df81ffcb 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -279,6 +279,9 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested) uint32_t parent_hwpt_id = 0; uint32_t parent_hwpt_id_not_work = 0; uint32_t test_hwpt_id = 0; + uint32_t iopf_hwpt_id; + uint32_t fault_id; + uint32_t fault_fd; if (self->device_id) { /* Negative tests */ @@ -326,6 +329,7 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested) sizeof(data)); /* Allocate two nested hwpts sharing one common parent hwpt */ + test_ioctl_fault_alloc(&fault_id, &fault_fd); test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0, &nested_hwpt_id[0], IOMMU_HWPT_DATA_SELFTEST, &data, @@ -334,6 +338,14 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested) &nested_hwpt_id[1], IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data)); + test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id, + UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID, + &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, + &data, sizeof(data)); + test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id, + IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id, + IOMMU_HWPT_DATA_SELFTEST, &data, + sizeof(data)); test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], IOMMU_TEST_IOTLB_DEFAULT); test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], @@ -504,14 +516,24 @@ TEST_F(iommufd_ioas, alloc_hwpt_nested) _test_ioctl_destroy(self->fd, nested_hwpt_id[1])); test_ioctl_destroy(nested_hwpt_id[0]); + /* Switch from nested_hwpt_id[1] to iopf_hwpt_id */ + test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id); + EXPECT_ERRNO(EBUSY, + _test_ioctl_destroy(self->fd, iopf_hwpt_id)); + /* Trigger an IOPF on the device */ + test_cmd_trigger_iopf(self->device_id, fault_fd); + /* Detach from nested_hwpt_id[1] and destroy it */ test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id); test_ioctl_destroy(nested_hwpt_id[1]); + test_ioctl_destroy(iopf_hwpt_id); /* Detach from the parent hw_pagetable and destroy it */ test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id); test_ioctl_destroy(parent_hwpt_id); test_ioctl_destroy(parent_hwpt_id_not_work); + close(fault_fd); + test_ioctl_destroy(fault_id); } else { test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0, &parent_hwpt_id); @@ -1722,10 +1744,17 @@ FIXTURE_VARIANT(iommufd_dirty_tracking) FIXTURE_SETUP(iommufd_dirty_tracking) { + unsigned long size; int mmap_flags; void *vrc; int rc; + if (variant->buffer_size < MOCK_PAGE_SIZE) { + SKIP(return, + "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu", + variant->buffer_size, MOCK_PAGE_SIZE); + } + self->fd = open("/dev/iommu", O_RDWR); ASSERT_NE(-1, self->fd); @@ -1749,12 +1778,11 @@ FIXTURE_SETUP(iommufd_dirty_tracking) assert(vrc == self->buffer); self->page_size = MOCK_PAGE_SIZE; - self->bitmap_size = - variant->buffer_size / self->page_size / BITS_PER_BYTE; + self->bitmap_size = variant->buffer_size / self->page_size; /* Provision with an extra (PAGE_SIZE) for the unaligned case */ - rc = posix_memalign(&self->bitmap, PAGE_SIZE, - self->bitmap_size + PAGE_SIZE); + size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE); + rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE); assert(!rc); assert(self->bitmap); assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); @@ -1775,51 +1803,63 @@ FIXTURE_SETUP(iommufd_dirty_tracking) FIXTURE_TEARDOWN(iommufd_dirty_tracking) { munmap(self->buffer, variant->buffer_size); - munmap(self->bitmap, self->bitmap_size); + munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE)); teardown_iommufd(self->fd, _metadata); } -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k) +{ + /* half of an u8 index bitmap */ + .buffer_size = 8UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k) +{ + /* one u8 index bitmap */ + .buffer_size = 16UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k) { /* one u32 index bitmap */ - .buffer_size = 128UL * 1024UL, + .buffer_size = 64UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) { /* one u64 index bitmap */ - .buffer_size = 256UL * 1024UL, + .buffer_size = 128UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k) { /* two u64 index and trailing end bitmap */ - .buffer_size = 640UL * 1024UL, + .buffer_size = 320UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M) { - /* 4K bitmap (128M IOVA range) */ - .buffer_size = 128UL * 1024UL * 1024UL, + /* 4K bitmap (64M IOVA range) */ + .buffer_size = 64UL * 1024UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge) { - /* 4K bitmap (128M IOVA range) */ - .buffer_size = 128UL * 1024UL * 1024UL, + /* 4K bitmap (64M IOVA range) */ + .buffer_size = 64UL * 1024UL * 1024UL, .hugepages = true, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) { - /* 8K bitmap (256M IOVA range) */ - .buffer_size = 256UL * 1024UL * 1024UL, + /* 8K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, }; -FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M_huge) +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge) { - /* 8K bitmap (256M IOVA range) */ - .buffer_size = 256UL * 1024UL * 1024UL, + /* 8K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, .hugepages = true, }; diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index f590417cd67a95bf31065f9b7682bc7c627ebf3a..c5d5e69452b010946dc42e2f0dc1a71c0a0480c7 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -615,7 +615,7 @@ TEST_FAIL_NTH(basic_fail_nth, device) if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL)) return -1; - if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id, + if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, 0, &hwpt_id, IOMMU_HWPT_DATA_NONE, 0, 0)) return -1; diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index 8d2b46b2114da814f75740992c0dc4b1be14d33b..40f6f14ce136f095abc7ee587dc3063dccd49fdd 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -22,6 +22,8 @@ #define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / __BITS_PER_LONG) +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + static inline void set_bit(unsigned int nr, unsigned long *addr) { unsigned long mask = BIT_MASK(nr); @@ -153,7 +155,7 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, EXPECT_ERRNO(_errno, _test_cmd_mock_domain_replace(self->fd, stdev_id, \ pt_id, NULL)) -static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, +static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, __u32 ft_id, __u32 flags, __u32 *hwpt_id, __u32 data_type, void *data, size_t data_len) { @@ -165,6 +167,7 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, .data_type = data_type, .data_len = data_len, .data_uptr = (uint64_t)data, + .fault_id = ft_id, }; int ret; @@ -177,24 +180,36 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, } #define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \ - ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \ hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, \ 0)) #define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \ EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc( \ - self->fd, device_id, pt_id, flags, \ + self->fd, device_id, pt_id, 0, flags, \ hwpt_id, IOMMU_HWPT_DATA_NONE, NULL, 0)) #define test_cmd_hwpt_alloc_nested(device_id, pt_id, flags, hwpt_id, \ data_type, data, data_len) \ - ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \ hwpt_id, data_type, data, data_len)) #define test_err_hwpt_alloc_nested(_errno, device_id, pt_id, flags, hwpt_id, \ data_type, data, data_len) \ EXPECT_ERRNO(_errno, \ - _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, flags, \ + _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, 0, flags, \ hwpt_id, data_type, data, data_len)) +#define test_cmd_hwpt_alloc_iopf(device_id, pt_id, fault_id, flags, hwpt_id, \ + data_type, data, data_len) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \ + flags, hwpt_id, data_type, data, \ + data_len)) +#define test_err_hwpt_alloc_iopf(_errno, device_id, pt_id, fault_id, flags, \ + hwpt_id, data_type, data, data_len) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, fault_id, \ + flags, hwpt_id, data_type, data, \ + data_len)) + #define test_cmd_hwpt_check_iotlb(hwpt_id, iotlb_id, expected) \ ({ \ struct iommu_test_cmd test_cmd = { \ @@ -346,12 +361,12 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, __u64 iova, size_t page_size, size_t pte_page_size, __u64 *bitmap, - __u64 bitmap_size, __u32 flags, + __u64 nbits, __u32 flags, struct __test_metadata *_metadata) { unsigned long npte = pte_page_size / page_size, pteset = 2 * npte; - unsigned long nbits = bitmap_size * BITS_PER_BYTE; unsigned long j, i, nr = nbits / pteset ?: 1; + unsigned long bitmap_size = DIV_ROUND_UP(nbits, BITS_PER_BYTE); __u64 out_dirty = 0; /* Mark all even bits as dirty in the mock domain */ @@ -684,3 +699,66 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, #define test_cmd_get_hw_capabilities(device_id, caps, mask) \ ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps)) + +static int _test_ioctl_fault_alloc(int fd, __u32 *fault_id, __u32 *fault_fd) +{ + struct iommu_fault_alloc cmd = { + .size = sizeof(cmd), + }; + int ret; + + ret = ioctl(fd, IOMMU_FAULT_QUEUE_ALLOC, &cmd); + if (ret) + return ret; + *fault_id = cmd.out_fault_id; + *fault_fd = cmd.out_fault_fd; + return 0; +} + +#define test_ioctl_fault_alloc(fault_id, fault_fd) \ + ({ \ + ASSERT_EQ(0, _test_ioctl_fault_alloc(self->fd, fault_id, \ + fault_fd)); \ + ASSERT_NE(0, *(fault_id)); \ + ASSERT_NE(0, *(fault_fd)); \ + }) + +static int _test_cmd_trigger_iopf(int fd, __u32 device_id, __u32 fault_fd) +{ + struct iommu_test_cmd trigger_iopf_cmd = { + .size = sizeof(trigger_iopf_cmd), + .op = IOMMU_TEST_OP_TRIGGER_IOPF, + .trigger_iopf = { + .dev_id = device_id, + .pasid = 0x1, + .grpid = 0x2, + .perm = IOMMU_PGFAULT_PERM_READ | IOMMU_PGFAULT_PERM_WRITE, + .addr = 0xdeadbeaf, + }, + }; + struct iommu_hwpt_page_response response = { + .code = IOMMUFD_PAGE_RESP_SUCCESS, + }; + struct iommu_hwpt_pgfault fault = {}; + ssize_t bytes; + int ret; + + ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_TRIGGER_IOPF), &trigger_iopf_cmd); + if (ret) + return ret; + + bytes = read(fault_fd, &fault, sizeof(fault)); + if (bytes <= 0) + return -EIO; + + response.cookie = fault.cookie; + + bytes = write(fault_fd, &response, sizeof(response)); + if (bytes <= 0) + return -EIO; + + return 0; +} + +#define test_cmd_trigger_iopf(device_id, fault_fd) \ + ASSERT_EQ(0, _test_cmd_trigger_iopf(self->fd, device_id, fault_fd))