diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 2ecee01222d49306b703dd674e58e83bfc72f7ef..802c8d9152296c4630991a64c52ca041e0502dbe 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -501,4 +501,13 @@ config SPRD_IOMMU Say Y here if you want to use the multimedia devices listed above. +config SMMU_BYPASS_DEV + bool "SMMU bypass streams for some specific devices" + depends on ARM_SMMU_V3=y + help + according smmu.bypassdev cmdline, SMMU performs attribute + transformation only,with no address translation. + E.g:SMMU allow iMR3408/3416 Raid bypass at DMA default domain + to support other devices Virtualization through. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index d0422bb137247c3e5cd75936fdb6bb09ac9af6c1..802774304c13f762025d9be9d6fac771c16d1096 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -41,6 +41,42 @@ module_param(disable_msipolling, bool, 0444); MODULE_PARM_DESC(disable_msipolling, "Disable MSI-based polling for CMD_SYNC completion."); +#ifdef CONFIG_SMMU_BYPASS_DEV +struct smmu_bypass_device { + unsigned short vendor; + unsigned short device; +}; +#define MAX_CMDLINE_SMMU_BYPASS_DEV 16 + +static struct smmu_bypass_device smmu_bypass_devices[MAX_CMDLINE_SMMU_BYPASS_DEV]; +static int smmu_bypass_devices_num; + +static int __init arm_smmu_bypass_dev_setup(char *str) +{ + unsigned short vendor; + unsigned short device; + int ret; + + if (!str) + return -EINVAL; + + ret = sscanf(str, "%hx:%hx", &vendor, &device); + if (ret != 2) + return -EINVAL; + + if (smmu_bypass_devices_num >= MAX_CMDLINE_SMMU_BYPASS_DEV) + return -ERANGE; + + smmu_bypass_devices[smmu_bypass_devices_num].vendor = vendor; + smmu_bypass_devices[smmu_bypass_devices_num].device = device; + smmu_bypass_devices_num++; + + return 0; +} + +__setup("smmu.bypassdev=", arm_smmu_bypass_dev_setup); +#endif + enum arm_smmu_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, @@ -2856,16 +2892,40 @@ static int arm_smmu_dev_disable_feature(struct device *dev, #define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \ (pdev)->device == 0xa12e) +#ifdef CONFIG_SMMU_BYPASS_DEV +static int arm_smmu_bypass_dev_domain_type(struct device *dev) +{ + int i; + struct pci_dev *pdev = to_pci_dev(dev); + + for (i = 0; i < smmu_bypass_devices_num; i++) { + if ((smmu_bypass_devices[i].vendor == pdev->vendor) && + (smmu_bypass_devices[i].device == pdev->device)) { + dev_info(dev, "device 0x%hx:0x%hx uses identity mapping.", + pdev->vendor, pdev->device); + return IOMMU_DOMAIN_IDENTITY; + } + } + + return 0; +} +#endif + static int arm_smmu_def_domain_type(struct device *dev) { + int ret = 0; + if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); if (IS_HISI_PTT_DEVICE(pdev)) return IOMMU_DOMAIN_IDENTITY; + #ifdef CONFIG_SMMU_BYPASS_DEV + ret = arm_smmu_bypass_dev_domain_type(dev); + #endif } - return 0; + return ret; } static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) @@ -3026,12 +3086,58 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) return 0; } +#ifdef CONFIG_SMMU_BYPASS_DEV +static void arm_smmu_install_bypass_ste_for_dev(struct arm_smmu_device *smmu, + u32 sid) +{ + u64 val; + __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + + if (!step) + return; + + val = STRTAB_STE_0_V; + val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); + step[0] = cpu_to_le64(val); + step[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, + STRTAB_STE_1_SHCFG_INCOMING)); + step[2] = 0; +} + +static int arm_smmu_prepare_init_l2_strtab(struct device *dev, void *data) +{ + u32 sid; + int ret; + struct pci_dev *pdev; + struct arm_smmu_device *smmu = (struct arm_smmu_device *)data; + + if (!arm_smmu_def_domain_type(dev)) + return 0; + + pdev = to_pci_dev(dev); + sid = PCI_DEVID(pdev->bus->number, pdev->devfn); + if (!arm_smmu_sid_in_range(smmu, sid)) + return -ERANGE; + + ret = arm_smmu_init_l2_strtab(smmu, sid); + if (ret) + return ret; + + arm_smmu_install_bypass_ste_for_dev(smmu, sid); + + return 0; +} +#endif + static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; +#ifdef CONFIG_SMMU_BYPASS_DEV + int ret; +#endif /* Calculate the L1 size, capped to the SIDSIZE. */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); @@ -3060,8 +3166,20 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, size); reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); cfg->strtab_base_cfg = reg; +#ifdef CONFIG_SMMU_BYPASS_DEV + ret = arm_smmu_init_l1_strtab(smmu); + if (ret) + return ret; + + if (smmu_bypass_devices_num) { + ret = bus_for_each_dev(&pci_bus_type, NULL, (void *)smmu, + arm_smmu_prepare_init_l2_strtab); + } + return ret; +#else return arm_smmu_init_l1_strtab(smmu); +#endif } static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)