<!DOCTYPE html><html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
<p><br>
</p>
<div class="moz-cite-prefix">On 1/6/2025 5:50 PM, Philip Yang wrote:<br>
</div>
<blockquote type="cite" cite="mid:4db6b354-56e6-3d2e-0da9-bfd1943320af@amd.com">
<p><br>
</p>
<div class="moz-cite-prefix">On 2025-01-02 19:06, Emily Deng
wrote:<br>
</div>
<blockquote type="cite" cite="mid:20250103000644.1398643-1-Emily.Deng@amd.com">
<pre class="moz-quote-pre" wrap="">For partial migrate from ram to vram, the migrate->cpages is not
equal to migrate->npages, should use migrate->npages to check all needed
migrate pages which could be copied or not.
And only need to set those pages could be migrated to migrate->dst[i], or
the migrate_vma_pages will migrate the wrong pages based on the migrate->dst[i].
Signed-off-by: Emily Deng <a class="moz-txt-link-rfc2396E" href="mailto:Emily.Deng@amd.com" moz-do-not-send="true"><Emily.Deng@amd.com></a>
---
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4b275937d05e..5c96c2d425e3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -278,7 +278,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
- uint64_t npages = migrate->cpages;
+ uint64_t npages = migrate->npages;</pre>
</blockquote>
<p>As partial migration size is based on prange granularity, by
default 2MB, maybe always migrate->cpages equal to
migrate->npages, that's why we didn't trigger this bug.
Wondering how do you catch this bug? This bug will leak svm_bo
too, as svm_migrate_get_vram_page reference counter is
incorrect.<br>
</p>
</blockquote>
<p>The commit message is somehow confusing. It is not partial
migration from page fault recovery. It is the case that <span style="white-space: pre-wrap">migrate->cpages != </span><span style="white-space: pre-wrap">migrate->npages after migrate_vma_setup due to some pages cannot be moved for some reasons. For that only part of src pages can be migrated. Ex: some system ram pages got locked by kernel. Usually this case does not happen during normal tests.</span></p>
<p><span style="white-space: pre-wrap">Regards</span></p>
<p><span style="white-space: pre-wrap">Xiaogang
</span></p>
<blockquote type="cite" cite="mid:4db6b354-56e6-3d2e-0da9-bfd1943320af@amd.com">
<p> </p>
<blockquote type="cite" cite="mid:20250103000644.1398643-1-Emily.Deng@amd.com">
<pre class="moz-quote-pre" wrap=""> struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
@@ -299,9 +299,6 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct page *spage;
dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
@@ -345,6 +342,9 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
} else {
j++;
}
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);</pre>
</blockquote>
<p>This should move forward, to handle the corner case to migrate
1 page to the last page of VRAM res cursor.</p>
<p>Please check this change, to add mpages accounting to break the
loop earlier. <br>
</p>
<p>- uint64_t npages = migrate->cpages;<br>
+ uint64_t npages = migrate->npages;<br>
struct amdgpu_device *adev = node->adev;<br>
struct device *dev = adev->dev;<br>
struct amdgpu_res_cursor cursor;<br>
+ uint64_t mpages = 0;<br>
dma_addr_t *src;<br>
uint64_t *dst;<br>
uint64_t i, j;<br>
@@ -295,14 +296,9 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
<br>
amdgpu_res_first(prange->ttm_res, ttm_res_offset,<br>
npages << PAGE_SHIFT,
&cursor);<br>
- for (i = j = 0; i < npages; i++) {<br>
+ for (i = j = 0; i < npages && mpages <
migrate->cpages; i++) {<br>
struct page *spage;<br>
<br>
- dst[i] = cursor.start + (j << PAGE_SHIFT);<br>
- migrate->dst[i] =
svm_migrate_addr_to_pfn(adev, dst[i]);<br>
- svm_migrate_get_vram_page(prange,
migrate->dst[i]);<br>
- migrate->dst[i] =
migrate_pfn(migrate->dst[i]);<br>
-<br>
spage = migrate_pfn_to_page(migrate->src[i]);<br>
if (spage &&
!is_zone_device_page(spage)) {<br>
src[i] = dma_map_page(dev, spage, 0,
PAGE_SIZE,<br>
@@ -322,6 +318,7 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
mfence);<br>
if (r)<br>
goto
out_free_vram_pages;<br>
+ mpages += j;<br>
amdgpu_res_next(&cursor, (j
+ 1) << PAGE_SHIFT);<br>
j = 0;<br>
} else {<br>
@@ -333,6 +330,11 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
pr_debug_ratelimited("dma mapping src to 0x%llx,
pfn 0x%lx\n",<br>
src[i] >> PAGE_SHIFT,
page_to_pfn(spage));<br>
<br>
+ dst[i] = cursor.start + (j << PAGE_SHIFT);<br>
+ migrate->dst[i] =
svm_migrate_addr_to_pfn(adev, dst[i]);<br>
+ svm_migrate_get_vram_page(prange,
migrate->dst[i]);<br>
+ migrate->dst[i] =
migrate_pfn(migrate->dst[i]);<br>
+<br>
if (j >= (cursor.size >> PAGE_SHIFT) -
1 && i < npages - 1) {<br>
r = svm_migrate_copy_memory_gart(adev,
src + i - j,<br>
dst + i
- j, j + 1,<br>
@@ -340,6 +342,7 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
mfence);<br>
if (r)<br>
goto out_free_vram_pages;<br>
+ mpages += j + 1;<br>
amdgpu_res_next(&cursor, (j + 1) *
PAGE_SIZE);<br>
j = 0;<br>
} else {<br>
(END)<br>
@@ -322,6 +318,7 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
mfence);<br>
if (r)<br>
goto
out_free_vram_pages;<br>
+ mpages += j;<br>
amdgpu_res_next(&cursor, (j
+ 1) << PAGE_SHIFT);<br>
j = 0;<br>
} else {<br>
@@ -333,6 +330,11 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
pr_debug_ratelimited("dma mapping src to 0x%llx,
pfn 0x%lx\n",<br>
src[i] >> PAGE_SHIFT,
page_to_pfn(spage));<br>
<br>
+ dst[i] = cursor.start + (j << PAGE_SHIFT);<br>
+ migrate->dst[i] =
svm_migrate_addr_to_pfn(adev, dst[i]);<br>
+ svm_migrate_get_vram_page(prange,
migrate->dst[i]);<br>
+ migrate->dst[i] =
migrate_pfn(migrate->dst[i]);<br>
+<br>
if (j >= (cursor.size >> PAGE_SHIFT) -
1 && i < npages - 1) {<br>
r = svm_migrate_copy_memory_gart(adev,
src + i - j,<br>
dst + i
- j, j + 1,<br>
@@ -340,6 +342,7 @@ svm_migrate_copy_to_vram(struct kfd_node
*node, struct svm_range *prange,<br>
mfence);<br>
if (r)<br>
goto out_free_vram_pages;<br>
+ mpages += j + 1;<br>
amdgpu_res_next(&cursor, (j + 1) *
PAGE_SIZE);<br>
j = 0;<br>
} else {<br>
<br>
</p>
<blockquote type="cite" cite="mid:20250103000644.1398643-1-Emily.Deng@amd.com">
<pre class="moz-quote-pre" wrap=""> }
r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
</pre>
</blockquote>
</blockquote>
</body>
</html>