[PATCH v3 15/21] iommu/vt-d: Add helper to allocate paging domain
Lu Baolu
baolu.lu at linux.intel.com
Mon Jun 10 08:55:49 UTC 2024
The domain_alloc_user operation is currently implemented by allocating a
paging domain using iommu_domain_alloc(). This is because it needs to fully
initialize the domain before return. Add a helper to do this to avoid using
iommu_domain_alloc().
Signed-off-by: Lu Baolu <baolu.lu at linux.intel.com>
---
drivers/iommu/intel/iommu.c | 87 +++++++++++++++++++++++++++++++++----
1 file changed, 78 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 2e9811bf2a4e..ccde5f5972e4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3633,6 +3633,79 @@ static struct iommu_domain blocking_domain = {
}
};
+static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
+{
+ if (!intel_iommu_superpage)
+ return 0;
+
+ if (first_stage)
+ return cap_fl1gp_support(iommu->cap) ? 2 : 1;
+
+ return fls(cap_super_page_val(iommu->cap));
+}
+
+static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *domain;
+ int addr_width;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->dev_pasids);
+ INIT_LIST_HEAD(&domain->cache_tags);
+ spin_lock_init(&domain->lock);
+ spin_lock_init(&domain->cache_lock);
+ xa_init(&domain->iommu_array);
+
+ domain->nid = dev_to_node(dev);
+ domain->has_iotlb_device = info->ats_enabled;
+ domain->use_first_level = first_stage;
+
+ /* calculate the address width */
+ addr_width = agaw_to_width(iommu->agaw);
+ if (addr_width > cap_mgaw(iommu->cap))
+ addr_width = cap_mgaw(iommu->cap);
+ domain->gaw = addr_width;
+ domain->agaw = iommu->agaw;
+ domain->max_addr = __DOMAIN_MAX_ADDR(addr_width);
+
+ /* iommu memory access coherency */
+ domain->iommu_coherency = iommu_paging_structure_coherency(iommu);
+
+ /* pagesize bitmap */
+ domain->domain.pgsize_bitmap = SZ_4K;
+ domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage);
+ domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
+
+ /*
+ * IOVA aperture: First-level translation restricts the input-address
+ * to a canonical address (i.e., address bits 63:N have the same value
+ * as address bit [N-1], where N is 48-bits with 4-level paging and
+ * 57-bits with 5-level paging). Hence, skip bit [N-1].
+ */
+ domain->domain.geometry.force_aperture = true;
+ domain->domain.geometry.aperture_start = 0;
+ if (first_stage)
+ domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
+ else
+ domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ /* always allocate the top pgd */
+ domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
+ if (!domain->pgd) {
+ kfree(domain);
+ return ERR_PTR(-ENOMEM);
+ }
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+
+ return domain;
+}
+
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
@@ -3695,15 +3768,11 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (user_data || (dirty_tracking && !ssads_supported(iommu)))
return ERR_PTR(-EOPNOTSUPP);
- /*
- * domain_alloc_user op needs to fully initialize a domain before
- * return, so uses iommu_domain_alloc() here for simple.
- */
- domain = iommu_domain_alloc(dev->bus);
- if (!domain)
- return ERR_PTR(-ENOMEM);
-
- dmar_domain = to_dmar_domain(domain);
+ /* Do not use first stage for user domain translation. */
+ dmar_domain = paging_domain_alloc(dev, false);
+ if (IS_ERR(dmar_domain))
+ return ERR_CAST(dmar_domain);
+ domain = &dmar_domain->domain;
if (nested_parent) {
dmar_domain->nested_parent = true;
--
2.34.1
More information about the dri-devel
mailing list