[PATCH v4 01/10] mm: add zone device coherent type memory support
Alistair Popple
apopple at nvidia.com
Fri Jan 28 01:57:40 UTC 2022
On Thursday, 27 January 2022 2:09:40 PM AEDT Alex Sierra wrote:
[...]
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 1852d787e6ab..277562cd4cf5 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -362,7 +362,7 @@ static int expected_page_refs(struct address_space
> *mapping, struct page *page)>
> * Device private pages have an extra refcount as they are
> * ZONE_DEVICE pages.
> */
>
> - expected_count += is_device_private_page(page);
> + expected_count += is_dev_private_or_coherent_page(page);
>
> if (mapping)
>
> expected_count += thp_nr_pages(page) +
> page_has_private(page);
>
> @@ -2503,7 +2503,7 @@ static bool migrate_vma_check_page(struct page *page)
>
> * FIXME proper solution is to rework migration_entry_wait()
> so
> * it does not need to take a reference on page.
> */
>
> - return is_device_private_page(page);
> + return is_dev_private_or_coherent_page(page);
As Andrew points out this no longer applies due to changes here. I think you
can just drop this hunk though.
[...]
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 6aebd1747251..32dae6839403 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1823,10 +1823,17 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
> * pteval maps a zone device page and is therefore
> * a swap pte.
> */
> - if (pte_swp_soft_dirty(pteval))
> - swp_pte = pte_swp_mksoft_dirty(swp_pte);
> - if (pte_swp_uffd_wp(pteval))
> - swp_pte = pte_swp_mkuffd_wp(swp_pte);
> + if (is_device_coherent_page(page)) {
> + if (pte_soft_dirty(pteval))
> + swp_pte = pte_swp_mksoft_dirty(swp_pte);
> + if (pte_uffd_wp(pteval))
> + swp_pte = pte_swp_mkuffd_wp(swp_pte);
> + } else {
> + if (pte_swp_soft_dirty(pteval))
> + swp_pte = pte_swp_mksoft_dirty(swp_pte);
> + if (pte_swp_uffd_wp(pteval))
> + swp_pte = pte_swp_mkuffd_wp(swp_pte);
> + }
As I understand things ptes for device coherent pages don't need special
treatment, therefore rather than special casing here it should just fall
through to the same path as normal pages. For that I think all you need is
something like:
- if (is_zone_device_page(page)) {
+ if (is_device_private_page(page)) {
Noting that device private pages are the only zone device pages that could
have been encountered here anyway.
> set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
> /*
> * No need to invalidate here it will synchronize on
> @@ -1837,7 +1844,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
> * Since only PAGE_SIZE pages can currently be
> * migrated, just set it to page. This will need to be
> * changed when hugepage migrations to device private
> - * memory are supported.
> + * or coherent memory are supported.
> */
> subpage = page;
> } else if (PageHWPoison(page)) {
> @@ -1943,7 +1950,8 @@ void try_to_migrate(struct page *page, enum ttu_flags flags)
> TTU_SYNC)))
> return;
>
> - if (is_zone_device_page(page) && !is_device_private_page(page))
> + if (is_zone_device_page(page) &&
> + !is_dev_private_or_coherent_page(page))
> return;
>
> /*
>
More information about the amd-gfx
mailing list