[PATCH 1/5] Revert "iommu/vt-d: Fix possible recursive lock in iommu_flush_dev_iotlb()"
Lucas De Marchi
lucas.demarchi at intel.com
Fri Aug 26 16:56:01 UTC 2022
This reverts commit 27d3412f05c99bf4022fffdd26abccbd6b16d861.
---
drivers/iommu/intel/iommu.c | 39 +++++++++++++++----------------------
1 file changed, 16 insertions(+), 23 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 0c32ad4deea7..a3f1b53909d2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -503,9 +503,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
- unsigned long flags;
- spin_lock_irqsave(&domain->lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
@@ -517,7 +516,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
return nid;
}
@@ -1344,20 +1343,19 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
- unsigned long flags;
if (!iommu->qi)
return NULL;
- spin_lock_irqsave(&domain->lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
return info->ats_supported ? info : NULL;
}
}
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
return NULL;
}
@@ -1366,9 +1364,8 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
- unsigned long flags;
- spin_lock_irqsave(&domain->lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
@@ -1376,7 +1373,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1468,15 +1465,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
- unsigned long flags;
if (!domain->has_iotlb_device)
return;
- spin_lock_irqsave(&domain->lock, flags);
+ spin_lock(&domain->lock);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -2431,7 +2427,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
- unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2443,9 +2438,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
- spin_lock_irqsave(&domain->lock, flags);
+ spin_lock(&domain->lock);
list_add(&info->link, &domain->devices);
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -4083,7 +4078,6 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
- unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4095,9 +4089,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
- spin_lock_irqsave(&domain->lock, flags);
+ spin_lock(&domain->lock);
list_del(&info->link);
- spin_unlock_irqrestore(&domain->lock, flags);
+ spin_unlock(&domain->lock);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
@@ -4416,20 +4410,19 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock_irqsave(&dmar_domain->lock, flags);
+ spin_lock(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ spin_unlock(&dmar_domain->lock);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ spin_unlock(&dmar_domain->lock);
return true;
}
--
2.37.2
More information about the Intel-gfx-trybot
mailing list