diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c
index 8a63449f27b5203fc0db69631db95c49ae1c03cd..de866e711be2ee3b1ecf0945508f0aa0f5b7a81b 100644
--- a/drivers/acpi/arm64/mpam.c
+++ b/drivers/acpi/arm64/mpam.c
@@ -95,25 +95,110 @@ static void acpi_mpam_parse_irqs(struct platform_device *pdev,
 	}
 }
 
-static int acpi_mpam_parse_resource(struct mpam_msc *msc,
+#define UUID_MPAM_INTERCONNECT_TABLE		"fe2bd645-033b-49e6-9479-2e0b8b21d1cd"
+
+struct acpi_mpam_interconnect_descriptor_table {
+	u8	type_uuid[16];
+	u32	num_descriptors;
+};
+
+struct acpi_mpam_interconnect_descriptor {
+	u32	source_id;
+	u32	destination_id;
+	u8	link_type;
+	u8	reserved[3];
+};
+
+static int acpi_mpam_parse_resource(struct acpi_mpam_msc_node *tbl_msc,
+				    struct mpam_msc *msc,
 				    struct acpi_mpam_resource_node *res)
 {
+	struct acpi_mpam_interconnect_descriptor_table *tbl_int_tbl;
+	struct acpi_mpam_interconnect_descriptor *tbl_int;
+	guid_t int_tbl_uuid, spec_uuid;
 	u32 cache_id;
+	off_t offset;
 	int level;
 
+	/*
+	 * Class IDs are somewhat arbitrary, but need to be co-ordinated.
+	 * 0-N are caches,
+	 * 64, 65: Interconnect, but ideally these would appear between the
+	 *     classes the controls are adjacent to.
+	 * 128: SMMU,
+	 * 192-192+level: Memory Side Caches, nothing checks that N is a
+	 *                small number.
+	 * 255: Memory Controllers
+	 *
+	 * ACPI devices would need a class id allocated based on the _HID.
+	 *
+	 * Classes that the mpam driver can't currently plumb into resctrl
+	 * are registered as UNKNOWN.
+	 */
 	switch (res->locator_type) {
 	case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE:
 		cache_id = res->locator.cache_locator.cache_reference;
 		level = find_acpi_cache_level_from_id(cache_id);
-		if (level < 0) {
+		if (level < 0 || level >= 64) {
 			pr_err_once("Bad level for cache with id %u\n", cache_id);
-			return level;
+			return -EINVAL;
 		}
 		return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE,
 				       level, cache_id);
 	case ACPI_MPAM_LOCATION_TYPE_MEMORY:
 		return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY,
 				       255, res->locator.memory_locator.proximity_domain);
+	case ACPI_MPAM_LOCATION_TYPE_SMMU:
+		return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_UNKNOWN,
+				       128, res->locator.smmu_locator.smmu_interface);
+	case ACPI_MPAM_LOCATION_TYPE_MEMORY_CACHE:
+		cache_id = res->locator.mem_cache_locator.reference;
+		level = res->locator.mem_cache_locator.level;
+		if (192 + level >= 255) {
+			pr_err_once("Bad level for memory side cache with reference %u\n",
+				    cache_id);
+			return -EINVAL;
+		}
+
+		return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE,
+				       192 + level, cache_id);
+
+	case ACPI_MPAM_LOCATION_TYPE_INTERCONNECT:
+		/* Find the descriptor table, and check it lands in the parent msc */
+		offset = res->locator.interconnect_ifc_locator.inter_connect_desc_tbl_off;
+		if (offset >= tbl_msc->length) {
+			pr_err_once("Bad offset for interconnect descriptor on msc %u\n",
+				    tbl_msc->identifier);
+			return -EINVAL;
+		}
+		tbl_int_tbl = ACPI_ADD_PTR(struct acpi_mpam_interconnect_descriptor_table,
+					   tbl_msc, offset);
+		guid_parse(UUID_MPAM_INTERCONNECT_TABLE, &spec_uuid);
+		import_guid(&int_tbl_uuid, tbl_int_tbl->type_uuid);
+		if (guid_equal(&spec_uuid, &int_tbl_uuid)) {
+			pr_err_once("Bad UUID for interconnect descriptor on msc %u\n",
+				    tbl_msc->identifier);
+			return -EINVAL;
+		}
+
+		offset += sizeof(*tbl_int_tbl);
+		offset += tbl_int_tbl->num_descriptors * sizeof(*tbl_int);
+		if (offset >= tbl_msc->length) {
+			pr_err_once("Bad num_descriptors for interconnect descriptor on msc %u\n",
+				    tbl_msc->identifier);
+			return -EINVAL;
+		}
+
+		tbl_int = ACPI_ADD_PTR(struct acpi_mpam_interconnect_descriptor,
+				       tbl_int_tbl, sizeof(*tbl_int_tbl));
+		cache_id = tbl_int->source_id;
+
+		/* Unknown link type? */
+		if (tbl_int->link_type != 0 && tbl_int->link_type == 1)
+			return 0;
+
+		return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_UNKNOWN,
+				       64 + tbl_int->link_type, cache_id);
 	default:
 		/* These get discovered later and treated as unknown */
 		return 0;
@@ -128,7 +213,7 @@ int acpi_mpam_parse_resources(struct mpam_msc *msc,
 
 	resources = (struct acpi_mpam_resource_node *)(tbl_msc + 1);
 	for (i = 0; i < tbl_msc->num_resouce_nodes; i++) {
-		err = acpi_mpam_parse_resource(msc, &resources[i]);
+		err = acpi_mpam_parse_resource(tbl_msc, msc, &resources[i]);
 		if (err)
 			return err;
 	}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 6b11af76eb975d1f00fec27495a755ff8b9c6050..06995ee563d5688e227c2abd30bc15039dabb919 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -336,7 +336,7 @@ static int arm_vsmmu_cache_invalidate(struct iommufd_viommu *viommu,
 			continue;
 
 		/* FIXME always uses the main cmdq rather than trying to group by type */
-		ret = arm_smmu_cmdq_issue_cmdlist(smmu, last->cmd, cur - last, true);
+		ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd, cur - last, true);
 		if (ret) {
 			cur--;
 			goto out;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 7fe05fea676a95d6373728e6f9ae5127d015707b..0f923cd53858cc5cbc1d26a28817f6470dd92d27 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -11,6 +11,7 @@
 
 #include <linux/acpi.h>
 #include <linux/acpi_iort.h>
+#include <linux/arm_mpam.h>
 #include <linux/bitops.h>
 #include <linux/crash_dump.h>
 #include <linux/delay.h>
@@ -719,11 +720,11 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
 
 /* Wait for the command queue to become non-full */
 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+					     struct arm_smmu_cmdq *cmdq,
 					     struct arm_smmu_ll_queue *llq)
 {
 	unsigned long flags;
 	struct arm_smmu_queue_poll qp;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	int ret = 0;
 
 	/*
@@ -754,11 +755,11 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
  * Must be called with the cmdq lock held in some capacity.
  */
 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+					  struct arm_smmu_cmdq *cmdq,
 					  struct arm_smmu_ll_queue *llq)
 {
 	int ret = 0;
 	struct arm_smmu_queue_poll qp;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
 
 	queue_poll_init(smmu, &qp);
@@ -778,10 +779,10 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
  * Must be called with the cmdq lock held in some capacity.
  */
 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+					       struct arm_smmu_cmdq *cmdq,
 					       struct arm_smmu_ll_queue *llq)
 {
 	struct arm_smmu_queue_poll qp;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	u32 prod = llq->prod;
 	int ret = 0;
 
@@ -828,12 +829,13 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
 }
 
 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+					 struct arm_smmu_cmdq *cmdq,
 					 struct arm_smmu_ll_queue *llq)
 {
 	if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
-		return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+		return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
 
-	return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+	return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
 }
 
 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
@@ -878,7 +880,7 @@ static int arm_smmu_ecmdq_issue_cmdlist(struct arm_smmu_device *smmu,
 
 		while (!queue_has_space(&llq, n + sync)) {
 			local_irq_restore(flags);
-			if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+			if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
 				dev_err_ratelimited(smmu->dev, "ECMDQ timeout\n");
 			local_irq_save(flags);
 		}
@@ -914,7 +916,7 @@ static int arm_smmu_ecmdq_issue_cmdlist(struct arm_smmu_device *smmu,
 	/* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
 	if (sync) {
 		llq.prod = queue_inc_prod_n(&llq, n);
-		ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+		ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
 		if (ret) {
 			dev_err_ratelimited(smmu->dev,
 					    "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
@@ -953,13 +955,13 @@ static int arm_smmu_ecmdq_issue_cmdlist(struct arm_smmu_device *smmu,
  *   CPU will appear before any of the commands from the other CPU.
  */
 int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
+				struct arm_smmu_cmdq *cmdq,
 				u64 *cmds, int n, bool sync)
 {
 	u64 cmd_sync[CMDQ_ENT_DWORDS];
 	u32 prod;
 	unsigned long flags;
 	bool owner;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	struct arm_smmu_ll_queue llq, head;
 	int ret = 0;
 
@@ -978,7 +980,7 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
 
 		while (!queue_has_space(&llq, n + sync)) {
 			local_irq_restore(flags);
-			if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+			if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
 				dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
 			local_irq_save(flags);
 		}
@@ -1064,7 +1066,7 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
 	/* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
 	if (sync) {
 		llq.prod = queue_inc_prod_n(&llq, n);
-		ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+		ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
 		if (ret) {
 			dev_err_ratelimited(smmu->dev,
 					    "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
@@ -1099,7 +1101,8 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
 		return -EINVAL;
 	}
 
-	return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
+	return arm_smmu_cmdq_issue_cmdlist(
+		smmu, arm_smmu_get_cmdq(smmu), cmd, 1, sync);
 }
 
 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1114,6 +1117,13 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
 	return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
 }
 
+static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu,
+				     struct arm_smmu_cmdq_batch *cmds)
+{
+	cmds->num = 0;
+	cmds->cmdq = arm_smmu_get_cmdq(smmu);
+}
+
 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
 				    struct arm_smmu_cmdq_batch *cmds,
 				    struct arm_smmu_cmdq_ent *cmd)
@@ -1122,13 +1132,15 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
 
 	if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
 	    (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
-		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
-		cmds->num = 0;
+		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
+					    cmds->num, true);
+		arm_smmu_cmdq_batch_init(smmu, cmds);
 	}
 
 	if (cmds->num == CMDQ_BATCH_ENTRIES) {
-		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
-		cmds->num = 0;
+		arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
+					    cmds->num, false);
+		arm_smmu_cmdq_batch_init(smmu, cmds);
 	}
 
 	index = cmds->num * CMDQ_ENT_DWORDS;
@@ -1144,7 +1156,8 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
 				      struct arm_smmu_cmdq_batch *cmds)
 {
-	return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
+	return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds,
+					   cmds->num, true);
 }
 
 static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
@@ -1396,7 +1409,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master,
 		},
 	};
 
-	cmds.num = 0;
+	arm_smmu_cmdq_batch_init(smmu, &cmds);
 
 	arm_smmu_preempt_disable(smmu);
 	for (i = 0; i < master->num_streams; i++) {
@@ -2254,7 +2267,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
 
 	arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
 
-	cmds.num = 0;
+	arm_smmu_cmdq_batch_init(master->smmu, &cmds);
 	arm_smmu_preempt_disable(master->smmu);
 	for (i = 0; i < master->num_streams; i++) {
 		cmd.atc.sid = master->streams[i].id;
@@ -2295,7 +2308,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
 	if (!atomic_read(&smmu_domain->nr_ats_masters))
 		return 0;
 
-	cmds.num = 0;
+	arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds);
 
 	arm_smmu_preempt_disable(smmu_domain->smmu);
 	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
@@ -2390,7 +2403,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
 			num_pages++;
 	}
 
-	cmds.num = 0;
+	arm_smmu_cmdq_batch_init(smmu, &cmds);
 
 	arm_smmu_preempt_disable(smmu);
 	while (iova < end) {
@@ -3985,6 +3998,104 @@ static int arm_smmu_def_domain_type(struct device *dev)
 	return ret;
 }
 
+static int arm_smmu_group_set_mpam(struct iommu_group *group, u16 partid,
+				   u8 pmg)
+{
+	int i;
+	u32 sid;
+	unsigned long flags;
+	struct arm_smmu_ste *step;
+	struct iommu_domain *domain;
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_master *master;
+	struct arm_smmu_cmdq_batch cmds;
+	struct arm_smmu_domain *smmu_domain;
+	struct arm_smmu_cmdq_ent cmd = {
+		.opcode	= CMDQ_OP_CFGI_STE,
+		.cfgi	= {
+			.leaf	= true,
+		},
+	};
+	struct arm_smmu_master_domain *master_domain;
+
+	domain = iommu_get_domain_for_group(group);
+	smmu_domain = to_smmu_domain(domain);
+
+	if (!smmu_domain || !smmu_domain->smmu)
+		return -EINVAL;
+
+	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_MPAM))
+		return -EIO;
+	smmu = smmu_domain->smmu;
+
+	arm_smmu_cmdq_batch_init(smmu, &cmds);
+
+	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+	list_for_each_entry(master_domain, &smmu_domain->devices,
+			    devices_elm) {
+		master = master_domain->master;
+
+		for (i = 0; i < master->num_streams; i++) {
+			sid = master->streams[i].id;
+			step = arm_smmu_get_step_for_sid(smmu, sid);
+
+			/* These need locking if the VMSPtr is ever used */
+			step->data[4] = FIELD_PREP(STRTAB_STE_4_PARTID, partid);
+			step->data[5] = FIELD_PREP(STRTAB_STE_5_PMG, pmg);
+
+			cmd.cfgi.sid = sid;
+			arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
+		}
+
+		master->partid = partid;
+		master->pmg = pmg;
+	}
+	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+	arm_smmu_cmdq_batch_submit(smmu, &cmds);
+
+	return 0;
+}
+
+static int arm_smmu_group_get_mpam(struct iommu_group *group, u16 *partid,
+				   u8 *pmg)
+{
+	int err = -EINVAL;
+	unsigned long flags;
+	struct iommu_domain *domain;
+	struct arm_smmu_master *master;
+	struct arm_smmu_domain *smmu_domain;
+	struct arm_smmu_master_domain *master_domain;
+
+	domain = iommu_get_domain_for_group(group);
+	smmu_domain = to_smmu_domain(domain);
+
+	if (!smmu_domain || !smmu_domain->smmu)
+		return 0;
+
+	if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_MPAM))
+		return -EIO;
+
+	if (!partid && !pmg)
+		return 0;
+
+	spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+	list_for_each_entry(master_domain, &smmu_domain->devices,
+			    devices_elm) {
+		master = master_domain->master;
+		if (master) {
+			if (partid)
+				*partid = master->partid;
+			if (pmg)
+				*pmg = master->pmg;
+			err = 0;
+		}
+	}
+	spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+
+	return err;
+}
+
 static struct iommu_ops arm_smmu_ops = {
 	.identity_domain	= &arm_smmu_identity_domain,
 	.blocked_domain		= &arm_smmu_blocked_domain,
@@ -4001,6 +4112,8 @@ static struct iommu_ops arm_smmu_ops = {
 	.remove_dev_pasid	= arm_smmu_remove_dev_pasid,
 	.dev_enable_feat	= arm_smmu_dev_enable_feature,
 	.dev_disable_feat	= arm_smmu_dev_disable_feature,
+	.get_group_qos_params	= arm_smmu_group_get_mpam,
+	.set_group_qos_params	= arm_smmu_group_set_mpam,
 	.page_response		= arm_smmu_page_response,
 	.def_domain_type	= arm_smmu_def_domain_type,
 	.viommu_alloc		= arm_vsmmu_alloc,
@@ -4978,6 +5091,29 @@ static void arm_smmu_get_httu(struct arm_smmu_device *smmu, u32 reg)
 			  hw_features, fw_features);
 }
 
+static void arm_smmu_mpam_register_smmu(struct arm_smmu_device *smmu)
+{
+	u16 partid_max;
+	u8 pmg_max;
+	u32 reg;
+
+	if (!IS_ENABLED(CONFIG_ARM64_MPAM))
+		return;
+
+	if (!(smmu->features & ARM_SMMU_FEAT_MPAM))
+		return;
+
+	reg = readl_relaxed(smmu->base + ARM_SMMU_MPAMIDR);
+	if (!reg)
+		return;
+
+	partid_max = FIELD_GET(SMMU_MPAMIDR_PARTID_MAX, reg);
+	pmg_max = FIELD_GET(SMMU_MPAMIDR_PMG_MAX, reg);
+
+	if (mpam_register_requestor(partid_max, pmg_max))
+		smmu->features &= ~ARM_SMMU_FEAT_MPAM;
+}
+
 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 {
 	u32 reg;
@@ -5147,6 +5283,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 
 	if (FIELD_GET(IDR3_RIL, reg))
 		smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+	if (FIELD_GET(IDR3_MPAM, reg))
+		smmu->features |= ARM_SMMU_FEAT_MPAM;
 
 	/* IDR5 */
 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
@@ -5218,6 +5356,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 	if (arm_smmu_sva_supported(smmu))
 		smmu->features |= ARM_SMMU_FEAT_SVA;
 
+	arm_smmu_mpam_register_smmu(smmu);
+
 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
 		 smmu->ias, smmu->oas, smmu->features);
 
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 38d46098c668b6dcb0dde44b1b6152ddbee9e748..a26011401b118ec4f50523b33811bdffb9522226 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -67,6 +67,7 @@
 #define IDR3_BBML2			2
 #define IDR3_FWB			(1 << 8)
 #define IDR3_RIL			(1 << 10)
+#define IDR3_MPAM			(1 << 7)
 
 #define ARM_SMMU_IDR5			0x14
 #define IDR5_STALL_MAX			GENMASK(31, 16)
@@ -198,6 +199,10 @@
 #define ARM_SMMU_PRIQ_IRQ_CFG1		0xd8
 #define ARM_SMMU_PRIQ_IRQ_CFG2		0xdc
 
+#define ARM_SMMU_MPAMIDR		0x130
+#define SMMU_MPAMIDR_PARTID_MAX		GENMASK(15, 0)
+#define SMMU_MPAMIDR_PMG_MAX		GENMASK(23, 16)
+
 #define ARM_SMMU_REG_SZ			0xe00
 
 /* Common MSI config fields */
@@ -298,6 +303,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
 
 #define STRTAB_STE_1_S2FWB		(1UL << 25)
 #define STRTAB_STE_1_S1STALLD		(1UL << 27)
+#define STRTAB_STE_1_S1MPAM		(1UL << 26)
 
 #define STRTAB_STE_1_EATS		GENMASK_ULL(29, 28)
 #define STRTAB_STE_1_EATS_ABT		0UL
@@ -327,6 +333,10 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
 
 #define STRTAB_STE_3_S2TTB_MASK		GENMASK_ULL(51, 4)
 
+#define STRTAB_STE_4_PARTID		GENMASK_ULL(31, 16)
+
+#define STRTAB_STE_5_PMG		GENMASK_ULL(7, 0)
+
 /* These bits can be controlled by userspace for STRTAB_STE_0_CFG_NESTED */
 #define STRTAB_STE_0_NESTING_ALLOWED                                         \
 	cpu_to_le64(STRTAB_STE_0_V | STRTAB_STE_0_CFG | STRTAB_STE_0_S1FMT | \
@@ -668,6 +678,7 @@ struct arm_smmu_ecmdq {
 
 struct arm_smmu_cmdq_batch {
 	u64				cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
+	struct arm_smmu_cmdq		*cmdq;
 	int				num;
 };
 
@@ -771,6 +782,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_BBML2		(1 << 24)
 #define ARM_SMMU_FEAT_ECMDQ		(1 << 25)
 #define ARM_SMMU_FEAT_S2FWB		(1 << 26)
+#define ARM_SMMU_FEAT_MPAM		(1 << 27)
 	u32				features;
 
 #define ARM_SMMU_OPT_SKIP_PREFETCH	(1 << 0)
@@ -848,6 +860,8 @@ struct arm_smmu_master {
 	bool				sva_enabled;
 	bool				iopf_enabled;
 	unsigned int			ssid_bits;
+	u16				partid;
+	u8				pmg;
 };
 
 /* SMMU private data for an IOMMU domain */
@@ -994,6 +1008,7 @@ void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
 				  const struct arm_smmu_ste *target);
 
 int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
+				struct arm_smmu_cmdq *cmdq,
 				u64 *cmds, int n, bool sync);
 
 #ifdef CONFIG_ARM_SMMU_V3_SVA
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8e02783eabcba112bfc5f28171a2fa43e866598c..98397cbb8b9bf1b0f94384337f88499908385f94 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1016,6 +1016,45 @@ struct iommu_group *iommu_group_alloc(void)
 }
 EXPORT_SYMBOL_GPL(iommu_group_alloc);
 
+struct iommu_group *iommu_group_get_from_kobj(struct kobject *group_kobj)
+{
+	struct iommu_group *group;
+
+	if (!iommu_group_kset || !group_kobj)
+		return NULL;
+
+	group = container_of(group_kobj, struct iommu_group, kobj);
+
+	kobject_get(group->devices_kobj);
+	kobject_put(&group->kobj);
+
+	return group;
+}
+
+struct iommu_group *iommu_group_get_by_id(int id)
+{
+	struct kobject *group_kobj;
+	const char *name;
+
+	if (!iommu_group_kset)
+		return NULL;
+
+	name = kasprintf(GFP_KERNEL, "%d", id);
+	if (!name)
+		return NULL;
+
+	group_kobj = kset_find_obj(iommu_group_kset, name);
+	kfree(name);
+
+	return iommu_group_get_from_kobj(group_kobj);
+}
+EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
+
+struct kset *iommu_get_group_kset(void)
+{
+	return kset_get(iommu_group_kset);
+}
+
 /**
  * iommu_group_get_iommudata - retrieve iommu_data registered for a group
  * @group: the group
@@ -2155,6 +2194,12 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
 
+struct iommu_domain *iommu_get_domain_for_group(struct iommu_group *group)
+{
+	return group->domain;
+}
+EXPORT_SYMBOL_GPL(iommu_get_domain_for_group);
+
 /*
  * For IOMMU_DOMAIN_DMA implementations which already provide their own
  * guarantees that the group and its default domain are valid and correct.
@@ -3784,3 +3829,79 @@ int iommu_replace_group_handle(struct iommu_group *group,
 	return ret;
 }
 EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, IOMMUFD_INTERNAL);
+
+/*
+ * iommu_group_set_qos_params() - Set the QoS parameters for a group
+ * @group: the iommu group.
+ * @partition: the partition label all traffic from the group should use.
+ * @perf_mon_grp: the performance label all traffic from the group should use.
+ *
+ * Return: 0 on success, or an error.
+ */
+int iommu_group_set_qos_params(struct iommu_group *group,
+			       u16 partition, u8 perf_mon_grp)
+{
+	const struct iommu_ops *ops;
+	struct group_device *device;
+	int ret;
+
+	mutex_lock(&group->mutex);
+	device = list_first_entry_or_null(&group->devices, typeof(*device),
+					  list);
+	if (!device) {
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	ops = dev_iommu_ops(device->dev);
+	if (!ops->set_group_qos_params) {
+		ret = -EOPNOTSUPP;
+		goto out_unlock;
+	}
+
+	ret = ops->set_group_qos_params(group, partition, perf_mon_grp);
+
+out_unlock:
+	mutex_unlock(&group->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_NS_GPL(iommu_group_set_qos_params, IOMMUFD_INTERNAL);
+
+/*
+ * iommu_group_get_qos_params() - Get the QoS parameters for a group
+ * @group: the iommu group.
+ * @partition: the partition label all traffic from the group uses.
+ * @perf_mon_grp: the performance label all traffic from the group uses.
+ *
+ * Return: 0 on success, or an error.
+ */
+int iommu_group_get_qos_params(struct iommu_group *group,
+			       u16 *partition, u8 *perf_mon_grp)
+{
+	const struct iommu_ops *ops;
+	struct group_device *device;
+	int ret;
+
+	mutex_lock(&group->mutex);
+	device = list_first_entry_or_null(&group->devices, typeof(*device),
+					  list);
+	if (!device) {
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	ops = dev_iommu_ops(device->dev);
+	if (!ops->get_group_qos_params) {
+		ret = -EOPNOTSUPP;
+		goto out_unlock;
+	}
+
+	ret = ops->get_group_qos_params(group, partition, perf_mon_grp);
+
+out_unlock:
+	mutex_unlock(&group->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_NS_GPL(iommu_group_get_qos_params, IOMMUFD_INTERNAL);
diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig
index 75f5b2454fbe45b9531e6a680ecdc55df166200b..a6e937b8f91d60ae43debd26ef0bab8ba65ed18a 100644
--- a/drivers/platform/mpam/Kconfig
+++ b/drivers/platform/mpam/Kconfig
@@ -6,3 +6,4 @@ config ARM_CPU_RESCTRL
 	depends on ARM64 && ARCH_HAS_CPU_RESCTRL
 	depends on MISC_FILESYSTEMS
 	select RESCTRL_RMID_DEPENDS_ON_CLOSID
+	select RESCTRL_IOMMU
diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c
index dd1ccca8edcb69dba004d291ecec9e14f8ddcf7a..609dcb21c7ac02c45927e9033e28f16032b408ec 100644
--- a/drivers/platform/mpam/mpam_resctrl.c
+++ b/drivers/platform/mpam/mpam_resctrl.c
@@ -8,6 +8,7 @@
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/errno.h>
+#include <linux/iommu.h>
 #include <linux/limits.h>
 #include <linux/list.h>
 #include <linux/printk.h>
@@ -244,6 +245,49 @@ bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid)
 	return (tsk_closid == closid) && (tsk_rmid == rmid);
 }
 
+int resctrl_arch_set_iommu_closid_rmid(struct iommu_group *group, u32 closid,
+				       u32 rmid)
+{
+	u16 partid;
+
+	if (cdp_enabled)
+		partid = closid << 1;
+	else
+		partid = closid;
+
+	return iommu_group_set_qos_params(group, partid, rmid);
+}
+
+bool resctrl_arch_match_iommu_closid(struct iommu_group *group, u32 closid)
+{
+	u16 partid;
+	int err = iommu_group_get_qos_params(group, &partid, NULL);
+
+	if (err)
+		return false;
+
+	if (cdp_enabled)
+		partid >>= 1;
+
+	return (partid == closid);
+}
+
+bool resctrl_arch_match_iommu_closid_rmid(struct iommu_group *group,
+					  u32 closid, u32 rmid)
+{
+	u8 pmg;
+	u16 partid;
+	int err = iommu_group_get_qos_params(group, &partid, &pmg);
+
+	if (err)
+		return false;
+
+	if (cdp_enabled)
+		partid >>= 1;
+
+	return (partid == closid) && (rmid == pmg);
+}
+
 struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l)
 {
 	if (l >= RDT_NUM_RESOURCES)
diff --git a/fs/resctrl/Kconfig b/fs/resctrl/Kconfig
index 5bbf5a1798cd943939f6211e2db896f3fff42131..468f02df8dd5d14fd3a97fec71efe5e236a6bfc2 100644
--- a/fs/resctrl/Kconfig
+++ b/fs/resctrl/Kconfig
@@ -21,3 +21,9 @@ config RESCTRL_RMID_DEPENDS_ON_CLOSID
 	  Enable by the architecture when the RMID values depend on the CLOSID.
 	  This causes the closid allocator to search for CLOSID with clean
 	  RMID.
+
+config RESCTRL_IOMMU
+	bool
+	help
+	  Enabled by the architecture when some IOMMU are able to be configured
+	  with CLOSID/RMID.
diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c
index 1ade368e26315e335ad172579886e1ed2860743a..13c64231c82d091682493864060d81b7b47bfb2b 100644
--- a/fs/resctrl/rdtgroup.c
+++ b/fs/resctrl/rdtgroup.c
@@ -17,6 +17,7 @@
 #include <linux/debugfs.h>
 #include <linux/fs.h>
 #include <linux/fs_parser.h>
+#include <linux/iommu.h>
 #include <linux/sysfs.h>
 #include <linux/kernfs.h>
 #include <linux/seq_buf.h>
@@ -740,10 +741,110 @@ static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
 	return ret;
 }
 
+static int rdtgroup_move_iommu(int iommu_group_id, struct rdtgroup *rdtgrp)
+{
+	const struct cred *cred = current_cred();
+	struct iommu_group *iommu_group;
+	int err;
+
+	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID)) {
+		rdt_last_cmd_printf("No permission to move iommu_group %d\n",
+				    iommu_group_id);
+		return -EPERM;
+	}
+
+	iommu_group = iommu_group_get_by_id(iommu_group_id);
+	if (!iommu_group) {
+		rdt_last_cmd_printf("No matching iommu_group %d\n",
+				    iommu_group_id);
+		return -ESRCH;
+	}
+
+	if (rdtgrp->type == RDTMON_GROUP &&
+	    !resctrl_arch_match_iommu_closid(iommu_group,
+					     rdtgrp->mon.parent->closid)) {
+		rdt_last_cmd_puts("Can't move iommu_group to different control group\n");
+		err = -EINVAL;
+	} else {
+		err = resctrl_arch_set_iommu_closid_rmid(iommu_group,
+							 rdtgrp->closid,
+							 rdtgrp->mon.rmid);
+	}
+
+	iommu_group_put(iommu_group);
+
+	return err;
+}
+
+static bool iommu_matches_rdtgroup(struct iommu_group *group, struct rdtgroup *r)
+{
+	if (r->type == RDTCTRL_GROUP)
+		return resctrl_arch_match_iommu_closid(group, r->closid);
+
+	return resctrl_arch_match_iommu_closid_rmid(group, r->closid,
+						    r->mon.rmid);
+}
+
+static int rdt_move_group_iommus(struct rdtgroup *from, struct rdtgroup *to)
+{
+	int err, iommu_group_id;
+	struct kset *iommu_groups;
+	struct iommu_group *group;
+	struct kobject *group_kobj = NULL;
+
+	if (!IS_ENABLED(CONFIG_RESCTRL_IOMMU))
+		return 0;
+
+	if (from == to)
+		return 0;
+
+	iommu_groups = iommu_get_group_kset();
+
+	while ((group_kobj = kset_get_next_obj(iommu_groups, group_kobj))) {
+		/* iommu_group_get_from_kobj() wants to drop a reference */
+		kobject_get(group_kobj);
+
+		group = iommu_group_get_from_kobj(group_kobj);
+		if (!group)
+			continue;
+
+		if (!from || iommu_matches_rdtgroup(group, from)) {
+			err = kstrtoint(group_kobj->name, 0, &iommu_group_id);
+			if (err)
+				break;
+
+			err = rdtgroup_move_iommu(iommu_group_id, to);
+			if (err)
+				break;
+		}
+	}
+
+	kset_put(iommu_groups);
+	return err;
+}
+
+static bool string_is_iommu_group(char *buf, int *val)
+{
+	if (!IS_ENABLED(CONFIG_RESCTRL_IOMMU))
+		return false;
+
+	if (strlen(buf) <= strlen("iommu_group:"))
+		return false;
+
+	if (strncmp(buf, "iommu_group:", strlen("iommu_group:")))
+		return false;
+
+	buf += strlen("iommu_group:");
+
+	return !kstrtoint(buf, 0, val);
+}
+
 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
 				    char *buf, size_t nbytes, loff_t off)
 {
 	struct rdtgroup *rdtgrp;
+	int iommu_group_id;
+	bool is_iommu;
 	char *pid_str;
 	int ret = 0;
 	pid_t pid;
@@ -765,6 +866,15 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
 	while (buf && buf[0] != '\0' && buf[0] != '\n') {
 		pid_str = strim(strsep(&buf, ","));
 
+		is_iommu = string_is_iommu_group(pid_str, &iommu_group_id);
+		if (is_iommu) {
+			ret = rdtgroup_move_iommu(iommu_group_id, rdtgrp);
+			if (ret)
+				break;
+
+			continue;
+		}
+
 		if (kstrtoint(pid_str, 0, &pid)) {
 			rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str);
 			ret = -EINVAL;
@@ -790,6 +900,32 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
 	return ret ?: nbytes;
 }
 
+static void show_rdt_iommu(struct rdtgroup *r, struct seq_file *s)
+{
+	struct kset *iommu_groups;
+	struct iommu_group *group;
+	struct kobject *group_kobj = NULL;
+
+	if (!IS_ENABLED(CONFIG_RESCTRL_IOMMU))
+		return;
+
+	iommu_groups = iommu_get_group_kset();
+
+	while ((group_kobj = kset_get_next_obj(iommu_groups, group_kobj))) {
+		/* iommu_group_get_from_kobj() wants to drop a reference */
+		kobject_get(group_kobj);
+
+		group = iommu_group_get_from_kobj(group_kobj);
+		if (!group)
+			continue;
+
+		if (iommu_matches_rdtgroup(group, r))
+			seq_printf(s, "iommu_group:%s\n", group_kobj->name);
+	}
+
+	kset_put(iommu_groups);
+}
+
 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
 {
 	struct task_struct *p, *t;
@@ -804,6 +940,8 @@ static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
 		}
 	}
 	rcu_read_unlock();
+
+	show_rdt_iommu(r, s);
 }
 
 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
@@ -2754,6 +2892,9 @@ static void rmdir_all_sub(void)
 	/* Move all tasks to the default resource group */
 	rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
 
+	/* Move all iommu_groups to the default resource group */
+	rdt_move_group_iommus(NULL, &rdtgroup_default);
+
 	list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
 		/* Free any child rmids */
 		free_all_child_rdtgrp(rdtgrp);
@@ -3469,6 +3610,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
 	/* Give any tasks back to the parent group */
 	rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
 
+	/* Give any iommu_groups back to the parent group */
+	rdt_move_group_iommus(rdtgrp, prdtgrp);
+
 	/* Update per cpu rmid of the moved CPUs first */
 	for_each_cpu(cpu, &rdtgrp->cpu_mask)
 		resctrl_arch_set_cpu_default_closid_rmid(cpu, rdtgrp->closid,
@@ -3512,6 +3656,9 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
 	/* Give any tasks back to the default group */
 	rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
 
+	/* Give any iommu_groups back to the default group */
+	rdt_move_group_iommus(rdtgrp, &rdtgroup_default);
+
 	/* Give any CPUs back to the default group */
 	cpumask_or(&rdtgroup_default.cpu_mask,
 		   &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
index d70e4e726fe65637ebc760297f4c19b076f48d59..504e7a4ba5f8677563b20985cbec634dc011594c 100644
--- a/include/linux/arm_mpam.h
+++ b/include/linux/arm_mpam.h
@@ -86,6 +86,12 @@ struct rdt_resource;
 void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid);
 void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx);
 
+int resctrl_arch_set_iommu_closid_rmid(struct iommu_group *group, u32 closid,
+				       u32 rmid);
+bool resctrl_arch_match_iommu_closid(struct iommu_group *group, u32 closid);
+bool resctrl_arch_match_iommu_closid_rmid(struct iommu_group *group, u32 closid,
+					  u32 rmid);
+
 /* Pseudo lock is not supported by MPAM */
 static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
 static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 83ec4bf9809eca4761f2226d9b054762a8d1d29e..919514bb1231f8f9b2b8122739ab39aaea9902af 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -703,6 +703,12 @@ struct iommu_ops {
 	int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
 	int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
 
+	/* Per group IOMMU features */
+	int (*get_group_qos_params)(struct iommu_group *group, u16 *partition,
+				    u8 *perf_mon_grp);
+	int (*set_group_qos_params)(struct iommu_group *group, u16 partition,
+				    u8 perf_mon_grp);
+
 	void (*page_response)(struct device *dev, struct iopf_fault *evt,
 			      struct iommu_page_response *msg);
 
@@ -963,6 +969,8 @@ extern bool iommu_present(const struct bus_type *bus);
 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
 extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
+struct iommu_group *iommu_group_get_from_kobj(struct kobject *group_kobj);
+extern struct iommu_group *iommu_group_get_by_id(int id);
 extern void iommu_domain_free(struct iommu_domain *domain);
 extern int iommu_attach_device(struct iommu_domain *domain,
 			       struct device *dev);
@@ -971,6 +979,7 @@ extern void iommu_detach_device(struct iommu_domain *domain,
 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
 				   struct device *dev, ioasid_t pasid);
 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_get_domain_for_group(struct iommu_group *group);
 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
 extern size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
 			   phys_addr_t paddr, size_t size, size_t *count);
@@ -1019,6 +1028,7 @@ extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
 extern void iommu_group_put(struct iommu_group *group);
 
 extern int iommu_group_id(struct iommu_group *group);
+struct kset *iommu_get_group_kset(void);
 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
 
 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
@@ -1283,6 +1293,10 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
 			       struct device *dev, ioasid_t pasid);
 ioasid_t iommu_alloc_global_pasid(struct device *dev);
 void iommu_free_global_pasid(ioasid_t pasid);
+int iommu_group_set_qos_params(struct iommu_group *group,
+			       u16 partition, u8 perf_mon_grp);
+int iommu_group_get_qos_params(struct iommu_group *group,
+			       u16 *partition, u8 *perf_mon_grp);
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
@@ -1309,6 +1323,16 @@ static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus
 	return NULL;
 }
 
+static inline struct iommu_group *iommu_group_get_from_kobj(struct kobject *group_kobj)
+{
+	return NULL;
+}
+
+static inline struct iommu_group *iommu_group_get_by_id(int id)
+{
+	return NULL;
+}
+
 static inline void iommu_domain_free(struct iommu_domain *domain)
 {
 }
@@ -1467,6 +1491,11 @@ static inline int iommu_group_id(struct iommu_group *group)
 	return -ENODEV;
 }
 
+static inline struct kset *iommu_get_group_kset(void)
+{
+	return NULL;
+}
+
 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
 		unsigned long quirks)
 {
@@ -1658,6 +1687,17 @@ static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
 }
 
 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
+static inline int iommu_group_set_qos_params(struct iommu_group *group,
+					     u16 partition, u8 perf_mon_grp)
+{
+	return -ENODEV;
+}
+
+static inline int iommu_group_get_qos_params(struct iommu_group *group,
+					     u16 *partition, u8 *perf_mon_grp)
+{
+	return -ENODEV;
+}
 #endif /* CONFIG_IOMMU_API */
 
 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 505dee42aaae2fd7ed4c8b0e081e1d703eb69b74..70a04d484158faa625c1f2aca5afc7844dd043e3 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -210,6 +210,8 @@ static inline const struct kobj_type *get_ktype(const struct kobject *kobj)
 
 struct kobject *kset_find_obj(struct kset *, const char *);
 
+struct kobject *kset_get_next_obj(struct kset *kset, struct kobject *prev);
+
 /* The global /sys/kernel/ kobject for people to chain off of */
 extern struct kobject *kernel_kobj;
 /* The global /sys/kernel/mm/ kobject for people to chain off of */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index edc7264a8369bdd525d86ded503aef2578c9f189..dfb3759f534a84498c591831694835e9d5abe650 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -422,4 +422,26 @@ void resctrl_exit(void);
 int resctrl_arch_mon_resource_init(void);
 void mbm_config_rftype_init(const char *config);
 
+/* When supported, the architecture must implement these */
+#ifndef CONFIG_RESCTRL_IOMMU
+static inline int
+resctrl_arch_set_iommu_closid_rmid(struct iommu_group *group,
+				   u32 closid, u32 rmid)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline bool
+resctrl_arch_match_iommu_closid(struct iommu_group *group, u32 closid)
+{
+	return false;
+}
+
+static inline bool
+resctrl_arch_match_iommu_closid_rmid(struct iommu_group *group,
+				     u32 closid, u32 rmid)
+{
+	return false;
+}
+#endif /* CONFIG_RESCTRL_IOMMU */
 #endif /* _RESCTRL_H */
diff --git a/lib/kobject.c b/lib/kobject.c
index 72fa20f405f1520a63dd50d9aa37f6609306eb3e..8dc19eba3835f79a79e022084ae3e474c5d1ee46 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -920,6 +920,27 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
 }
 EXPORT_SYMBOL_GPL(kset_find_obj);
 
+struct kobject *kset_get_next_obj(struct kset *kset, struct kobject *prev)
+{
+	struct kobject *k;
+
+	spin_lock(&kset->list_lock);
+
+	if (!prev)
+		k = list_first_entry_or_null(&kset->list, typeof(*k), entry);
+	else
+		k = list_next_entry(prev, entry);
+
+	if (list_entry_is_head(k, &kset->list, entry))
+		k = NULL;
+
+	kobject_get(k);
+	spin_unlock(&kset->list_lock);
+	kobject_put(prev);
+
+	return k;
+}
+
 static void kset_release(struct kobject *kobj)
 {
 	struct kset *kset = container_of(kobj, struct kset, kobj);