[Freedreno] [PATCH 7/7] iommu/arm-smmu: add support for dynamic domains

Jordan Crouse jcrouse at codeaurora.org
Tue Mar 7 16:39:55 UTC 2017


Implement support for dynamic domain switching. This feature is
only enabled when the qcom,dynamic device tree attribute for an smmu
instance.

In order to use dynamic domains, a non-dynamic domain must first
be created and attached.  The non-dynamic domain must remain
attached while the device is in use.

The dynamic domain is cloned from the non-dynamic domain. Important
configuration information is copied from the non-dynamic domain and
the dynamic domain is automatically "attached" (though it doesn't
program the hardware).

To switch domains dynamically the hardware must program the TTBR0 register
with the value from the DOMAIN_ATTR_TTBR0 attribute for the dynamic domain.
The upstream driver may also need to do other hardware specific register
programming to properly synchronize the domain switch. It must ensure that
all register state except for the TTBR0 register is restored
at the end of the switch operation.

Signed-off-by: Jeremy Gebben <jgebben at codeaurora.org>
Signed-off-by: Jordan Crouse <jcrouse at codeaurora.org>
---
 drivers/iommu/arm-smmu.c | 157 ++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 136 insertions(+), 21 deletions(-)

diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e051750..34943f0 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -349,6 +349,7 @@ struct arm_smmu_device {
 	u32				features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_DYNAMIC		(1 << 1)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -377,6 +378,8 @@ struct arm_smmu_device {
 	struct clk                      **clocks;
 
 	u32				cavium_id_base; /* Specific to Cavium */
+
+	struct ida			asid_ida;
 };
 
 enum arm_smmu_context_fmt {
@@ -391,11 +394,17 @@ struct arm_smmu_cfg {
 	u8				irptndx;
 	u32				cbar;
 	enum arm_smmu_context_fmt	fmt;
+	u16                             asid;
+	u8                              vmid;
 };
 #define INVALID_IRPTNDX			0xff
+#define INVALID_ASID                   0xffff
+
+/* 0xff is a reasonable limit that works for all targets */
+#define MAX_ASID			0xff
 
-#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
+#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->asid)
+#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->vmid)
 
 enum arm_smmu_domain_stage {
 	ARM_SMMU_DOMAIN_S1 = 0,
@@ -426,6 +435,7 @@ struct arm_smmu_option_prop {
 
 static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+	{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
 	{ 0, NULL},
 };
 
@@ -473,6 +483,11 @@ static void parse_driver_options(struct arm_smmu_device *smmu)
 	} while (arm_smmu_options[++i].opt);
 }
 
+static bool is_dynamic_domain(struct iommu_domain *domain)
+{
+	return !!(domain->type & (__IOMMU_DOMAIN_DYNAMIC));
+}
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -602,6 +617,10 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
 static void arm_smmu_tlb_sync(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
+
+	if (!smmu_domain->smmu)
+		return;
+
 	__arm_smmu_tlb_sync(smmu_domain->smmu);
 }
 
@@ -832,6 +851,44 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 	writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
 }
 
+static int arm_smmu_init_asid(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	int ret;
+
+	/* For regular domains the asid is the context bank id */
+	if (likely(!is_dynamic_domain(domain))) {
+		cfg->asid = cfg->cbndx;
+		return 0;
+	}
+
+	/*
+	 * For dynamic domains, allocate a unique asid from our pool of virtual
+	 * values
+	 */
+	ret = ida_simple_get(&smmu->asid_ida, smmu->num_context_banks + 2,
+		MAX_ASID + 1, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n", ret);
+		return ret;
+	}
+
+	cfg->asid = ret;
+	return 0;
+}
+
+static void arm_smmu_free_asid(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+
+	if (cfg->asid != INVALID_ASID && is_dynamic_domain(domain))
+		ida_simple_remove(&smmu->asid_ida, cfg->asid);
+}
+
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 					struct arm_smmu_device *smmu)
 {
@@ -841,6 +898,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 	enum io_pgtable_fmt fmt;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	bool dynamic = is_dynamic_domain(domain);
 	unsigned int quirks =
 		smmu_domain->attributes & (1 << DOMAIN_ATTR_ENABLE_TTBR1) ?
 			IO_PGTABLE_QUIRK_ARM_TTBR1 : 0;
@@ -849,6 +907,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 	if (smmu_domain->smmu)
 		goto out_unlock;
 
+	smmu_domain->cfg.asid = INVALID_ASID;
+
 	/*
 	 * Mapping the requested stage onto what we support is surprisingly
 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -939,12 +999,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 		goto out_unlock;
 	}
 
-	ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
-				      smmu->num_context_banks);
-	if (ret < 0)
-		goto out_unlock;
-
-	cfg->cbndx = ret;
+	/* Dynamic domains will inherit cbndx from the parent */
+	if (!dynamic) {
+		ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+					      smmu->num_context_banks);
+		if (ret < 0)
+			goto out_unlock;
+		cfg->cbndx = ret;
+	}
 	if (smmu->version < ARM_SMMU_V2) {
 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
 		cfg->irptndx %= smmu->num_context_irqs;
@@ -961,6 +1023,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 		.iommu_dev	= smmu->dev,
 	};
 
+	cfg->vmid = cfg->cbndx + 1;
+
 	smmu_domain->smmu = smmu;
 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
 					 smmu_domain);
@@ -974,19 +1038,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 	domain->geometry.aperture_end = (1UL << ias) - 1;
 	domain->geometry.force_aperture = true;
 
-	/* Initialise the context bank with our page table cfg */
-	arm_smmu_init_context_bank(smmu_domain, &smmu_domain->pgtbl_cfg);
+	/* Assign an asid */
+	ret = arm_smmu_init_asid(domain, smmu);
+	if (ret)
+		goto out_clear_smmu;
 
-	/*
-	 * Request context fault interrupt. Do this last to avoid the
-	 * handler seeing a half-initialised domain state.
-	 */
-	irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
-	ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
-			       IRQF_SHARED, "arm-smmu-context-fault", domain);
-	if (ret < 0) {
-		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
-			cfg->irptndx, irq);
+	if (!dynamic) {
+		/* Initialise the context bank with our page table cfg */
+		arm_smmu_init_context_bank(smmu_domain,
+						&smmu_domain->pgtbl_cfg);
+
+		/*
+		 * Request context fault interrupt. Do this last to avoid the
+		 * handler seeing a half-initialised domain state.
+		 */
+		irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+		ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+				IRQF_SHARED, "arm-smmu-context-fault", domain);
+		if (ret < 0) {
+			dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+				cfg->irptndx, irq);
+			cfg->irptndx = INVALID_IRPTNDX;
+			goto out_clear_smmu;
+		}
+	} else {
 		cfg->irptndx = INVALID_IRPTNDX;
 	}
 
@@ -1014,6 +1089,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 	if (!smmu)
 		return;
 
+	if (is_dynamic_domain(domain)) {
+		arm_smmu_free_asid(domain);
+		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+		return;
+	}
+
 	/*
 	 * Disable the context bank and free the page tables before freeing
 	 * it.
@@ -1021,6 +1102,8 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 	writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
 
+	arm_smmu_tlb_inv_context(smmu_domain);
+
 	if (cfg->irptndx != INVALID_IRPTNDX) {
 		irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
 		devm_free_irq(smmu->dev, irq, domain);
@@ -1034,7 +1117,8 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
 	struct arm_smmu_domain *smmu_domain;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA &&
+		type != IOMMU_DOMAIN_DYNAMIC)
 		return NULL;
 	/*
 	 * Allocate the domain and initialise some of its data structures.
@@ -1257,6 +1341,10 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 	struct arm_smmu_device *smmu;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
+	/* Dynamic domains do not need to be attached */
+	if (is_dynamic_domain(domain))
+		return 0;
+
 	if (!fwspec || fwspec->ops != &arm_smmu_ops) {
 		dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
 		return -ENXIO;
@@ -1531,6 +1619,29 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
 	return group;
 }
 
+static int arm_smmu_domain_init_dynamic(struct iommu_domain *parent,
+		struct iommu_domain *child)
+{
+	struct arm_smmu_domain *parent_domain = to_smmu_domain(parent);
+	struct arm_smmu_domain *child_domain = to_smmu_domain(child);
+	struct arm_smmu_device *smmu = parent_domain->smmu;
+
+	/* We can't do any of this until the parent is attached */
+	if (!smmu)
+		return -ENODEV;
+
+	if (!(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
+		dev_err(smmu->dev, "dynamic domains are not supported\n");
+		return -EPERM;
+	}
+
+	/* Copy the context bank from the parent */
+	child_domain->cfg.cbndx = parent_domain->cfg.cbndx;
+
+	/* Initialize the context and create all the useful stuff */
+	return arm_smmu_init_domain_context(child, smmu);
+}
+
 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
 				    enum iommu_attr attr, void *data)
 {
@@ -1626,6 +1737,7 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
 	.domain_get_attr	= arm_smmu_domain_get_attr,
 	.domain_set_attr	= arm_smmu_domain_set_attr,
 	.of_xlate		= arm_smmu_of_xlate,
+	.domain_init_dynamic	= arm_smmu_domain_init_dynamic,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
 };
 
@@ -2037,6 +2149,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 	smmu->dev = dev;
+	ida_init(&smmu->asid_ida);
 
 	data = of_device_get_match_data(dev);
 	smmu->version = data->version;
@@ -2148,6 +2261,8 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
 		dev_err(&pdev->dev, "removing device with active domains!\n");
 
+	ida_destroy(&smmu->asid_ida);
+
 	/* Turn the thing off */
 	writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
 
-- 
1.9.1



More information about the Freedreno mailing list