<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
<p><br>
</p>
<div class="moz-cite-prefix">On 2021-04-23 2:03 a.m., Felix Kuehling
wrote:<br>
</div>
<blockquote type="cite" cite="mid:40afb92c-cdb1-a20b-0976-a7e783462e62@amd.com">
<pre class="moz-quote-pre" wrap="">Am 2021-04-22 um 10:03 p.m. schrieb Philip Yang:
</pre>
<blockquote type="cite">
<pre class="moz-quote-pre" wrap="">Add interface to remove address from fault filter ring by resetting
fault ring entry of the fault address timestamp to 0, then future vm
fault on the address will be processed to recover.
Use spinlock to protect fault hash ring access by interrupt handler and
interrupt scheduled deferred work for vg20.
</pre>
</blockquote>
<pre class="moz-quote-pre" wrap="">
This needs a better explanation. When you say Vega20, I think you're
referring to the lack of HW IH rerouting. In that case
amdgpu_gmc_filter_faults runs in interrupt context before delegating the
IH entries to the SW IH ring.
</pre>
</blockquote>
yes, Vega20 uses ih_soft ring, I need add drain retry fault from
ih_soft ring for Vega20.<br>
<blockquote type="cite" cite="mid:40afb92c-cdb1-a20b-0976-a7e783462e62@amd.com">
<pre class="moz-quote-pre" wrap="">
On GPUs that support IH rerouting, amdgpu_gmc_filter_faults runs in the
same thread as the page fault handling, so there is no risk of
concurrently accessing the fault ring assuming that
amdgpu_gmc_filter_faults_remove is only called from the page fault handler.
Christian had an idea to do this without a lock, by using cmpxchg. I
guess that idea didn't work out?
</pre>
</blockquote>
<p>cmpxchg cannot use for 52bit fault->key, which share same
uint64_t with 8bit fault->next. This is compilation error:<br>
</p>
<p>/home/yangp/git/compute_staging/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c:435:22:
error: cannot take address of bit-field ‘key’</p>
if (atomic_cmpxchg(&fault->key, key, 0) == key)<br>
<p>Vega20 interrupt handler and deferred work access fault hash
table and fault ring, not just fault->key, so I decide to use
spinlock.<br>
</p>
<blockquote type="cite" cite="mid:40afb92c-cdb1-a20b-0976-a7e783462e62@amd.com">
<pre class="moz-quote-pre" wrap="">
</pre>
<blockquote type="cite">
<pre class="moz-quote-pre" wrap="">
Signed-off-by: Philip Yang <a class="moz-txt-link-rfc2396E" href="mailto:Philip.Yang@amd.com"><Philip.Yang@amd.com></a>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 66 +++++++++++++++++++++++--
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 3 ++
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 1 +
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 +
4 files changed, 68 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index c39ed9eb0987..801ea0623453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -332,6 +332,17 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
}
+/**
+ * fault_key - get 52bit hask key from vm fault address and pasid
+ *
+ * @addr: 48bit physical address
+ * @pasid: 4 bit
+ */
+static inline uint64_t fault_key(uint64_t addr, uint16_t pasid)
+{
+ return addr << 4 | pasid;
+}
+
/**
* amdgpu_gmc_filter_faults - filter VM faults
*
@@ -349,15 +360,20 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
{
struct amdgpu_gmc *gmc = &adev->gmc;
- uint64_t stamp, key = addr << 4 | pasid;
+ uint64_t stamp, key = fault_key(addr, pasid);
struct amdgpu_gmc_fault *fault;
+ unsigned long flags;
uint32_t hash;
/* If we don't have space left in the ring buffer return immediately */
stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
AMDGPU_GMC_FAULT_TIMEOUT;
- if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
+
+ spin_lock_irqsave(&gmc->fault_lock, flags);
+ if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) {
+ spin_unlock_irqrestore(&gmc->fault_lock, flags);
return true;
+ }
/* Try to find the fault in the hash */
hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
@@ -365,8 +381,10 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
while (fault->timestamp >= stamp) {
uint64_t tmp;
- if (fault->key == key)
+ if (fault->key == key) {
+ spin_unlock_irqrestore(&gmc->fault_lock, flags);
return true;
+ }
tmp = fault->timestamp;
fault = &gmc->fault_ring[fault->next];
@@ -384,9 +402,51 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
/* And update the hash */
fault->next = gmc->fault_hash[hash].idx;
gmc->fault_hash[hash].idx = gmc->last_fault++;
+ spin_unlock_irqrestore(&gmc->fault_lock, flags);
return false;
}
+/**
+ * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
+ *
+ * @adev: amdgpu device structure
+ * @addr: address of the VM fault
+ * @pasid: PASID of the process causing the fault
+ *
+ * Remove the address from fault filter, then future vm fault on this address
+ * will pass to retry fault handler to recover.
+ */
+void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+
+ uint64_t key = fault_key(addr, pasid);
+ struct amdgpu_gmc_fault *fault;
+ unsigned long flags;
+ uint32_t hash;
+
+ spin_lock_irqsave(&gmc->fault_lock, flags);
+ hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
+ fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
+ while (true) {
+ uint64_t tmp;
+
+ if (fault->key == key) {
+ fault->timestamp = 0;
</pre>
</blockquote>
<pre class="moz-quote-pre" wrap="">
Setting the timestamp to 0 breaks the chain of interrupts with the same
hash. As you can see in amdgpu_gmc_filter_faults, it uses a closed hash
algorithm that looks for the entry with the correct key until it hits a
time stamp that's too old. So resetting the timestamp will break that
chain and effectively remove all entries with the same hash that have
older timestamps than the one you intended to remove.
I suggested invalidating the fault->key instead, leaving the timestamp
alone. This would effectively remove the entry from the hash chain but
would still allow amdgpu_gmc_filter_fault to find older entries with the
same hash.</pre>
</blockquote>
<p>thanks, I understand how this closed hash table and fault ring
works now.</p>
<p>Will send out v3 shortly.</p>
<p>Thanks,</p>
<p>Philip<br>
</p>
<blockquote type="cite" cite="mid:40afb92c-cdb1-a20b-0976-a7e783462e62@amd.com">
<pre class="moz-quote-pre" wrap="">
Regards,
Felix
</pre>
<blockquote type="cite">
<pre class="moz-quote-pre" wrap="">+ break;
+ }
+
+ tmp = fault->timestamp;
+ fault = &gmc->fault_ring[fault->next];
+
+ /* Check if the entry was reused */
+ if (fault->timestamp >= tmp)
+ break;
+ }
+ spin_unlock_irqrestore(&gmc->fault_lock, flags);
+}
+
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
{
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 9d11c02a3938..0aae3bd01bf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -246,6 +246,7 @@ struct amdgpu_gmc {
uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
} fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+ spinlock_t fault_lock;
bool tmz_enabled;
@@ -318,6 +319,8 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
+void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 498b28a35f5b..7416ad874652 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -839,6 +839,7 @@ static int gmc_v10_0_sw_init(void *handle)
adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
+ spin_lock_init(&adev->gmc.fault_lock);
if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 4da8b3d28af2..3290b259a372 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1444,6 +1444,7 @@ static int gmc_v9_0_sw_init(void *handle)
adev->mmhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
+ spin_lock_init(&adev->gmc.fault_lock);
r = amdgpu_atomfirmware_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
</pre>
</blockquote>
</blockquote>
</body>
</html>