[PATCH v2 1/3] drm/gpusvm, drm/pagemap: Move migration functionality to drm_pagemap

kernel test robot lkp at intel.com
Wed Jun 4 15:45:11 UTC 2025


Hi Thomas,

kernel test robot noticed the following build errors:

[auto build test ERROR on drm-xe/drm-xe-next]
[also build test ERROR on next-20250604]
[cannot apply to linus/master v6.15]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Thomas-Hellstr-m/drm-gpusvm-drm-pagemap-Move-migration-functionality-to-drm_pagemap/20250604-173757
base:   https://gitlab.freedesktop.org/drm/xe/kernel.git drm-xe-next
patch link:    https://lore.kernel.org/r/20250604093536.95982-2-thomas.hellstrom%40linux.intel.com
patch subject: [PATCH v2 1/3] drm/gpusvm, drm/pagemap: Move migration functionality to drm_pagemap
config: loongarch-allyesconfig (https://download.01.org/0day-ci/archive/20250604/202506042352.xDT1ySBT-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 15.1.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250604/202506042352.xDT1ySBT-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp at intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202506042352.xDT1ySBT-lkp@intel.com/

All error/warnings (new ones prefixed by >>):

   loongarch64-linux-ld: arch/loongarch/kernel/head.o: relocation R_LARCH_B26 overflow 0xffffffffef55fa70
   arch/loongarch/kernel/head.o: in function `smpboot_entry':
>> (.ref.text+0x160): relocation truncated to fit: R_LARCH_B26 against symbol `start_secondary' defined in .text section in arch/loongarch/kernel/smp.o
   loongarch64-linux-ld: final link failed: bad value
--
>> drivers/gpu/drm/drm_pagemap.c:314: warning: Function parameter or struct member 'timeslice_ms' not described in 'drm_pagemap_migrate_to_devmem'


vim +314 drivers/gpu/drm/drm_pagemap.c

   271	
   272	
   273	/**
   274	 * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
   275	 * @devmem_allocation: The device memory allocation to migrate to.
   276	 * The caller should hold a reference to the device memory allocation,
   277	 * and the reference is consumed by this function unless it returns with
   278	 * an error.
   279	 * @mm: Pointer to the struct mm_struct.
   280	 * @start: Start of the virtual address range to migrate.
   281	 * @end: End of the virtual address range to migrate.
   282	 * @pgmap_owner: Not used currently, since only system memory is considered.
   283	 *
   284	 * This function migrates the specified virtual address range to device memory.
   285	 * It performs the necessary setup and invokes the driver-specific operations for
   286	 * migration to device memory. Expected to be called while holding the mmap lock in
   287	 * at least read mode.
   288	 *
   289	 * Return: %0 on success, negative error code on failure.
   290	 */
   291	
   292	/*
   293	 * @range: Pointer to the GPU SVM range structure
   294	 * @devmem_allocation: Pointer to the device memory allocation. The caller
   295	 *                     should hold a reference to the device memory allocation,
   296	 *                     which should be dropped via ops->devmem_release or upon
   297	 *                     the failure of this function.
   298	 * @ctx: GPU SVM context
   299	 *
   300	 * This function migrates the specified GPU SVM range to device memory. It
   301	 * performs the necessary setup and invokes the driver-specific operations for
   302	 * migration to device memory. Upon successful return, @devmem_allocation can
   303	 * safely reference @range until ops->devmem_release is called which only upon
   304	 * successful return. Expected to be called while holding the mmap lock in read
   305	 * mode.
   306	 *
   307	 * Return: 0 on success, negative error code on failure.
   308	 */
   309	int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
   310					  struct mm_struct *mm,
   311					  unsigned long start, unsigned long end,
   312					  unsigned long timeslice_ms,
   313					  void *pgmap_owner)
 > 314	{
   315		const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
   316		struct migrate_vma migrate = {
   317			.start		= start,
   318			.end		= end,
   319			.pgmap_owner	= pgmap_owner,
   320			.flags		= MIGRATE_VMA_SELECT_SYSTEM,
   321		};
   322		unsigned long i, npages = npages_in_range(start, end);
   323		struct vm_area_struct *vas;
   324		struct drm_pagemap_zdd *zdd = NULL;
   325		struct page **pages;
   326		dma_addr_t *dma_addr;
   327		void *buf;
   328		int err;
   329	
   330		mmap_assert_locked(mm);
   331	
   332		if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
   333		    !ops->copy_to_ram)
   334			return -EOPNOTSUPP;
   335	
   336		vas = vma_lookup(mm, start);
   337		if (!vas) {
   338			err = -ENOENT;
   339			goto err_out;
   340		}
   341	
   342		if (end > vas->vm_end || start < vas->vm_start) {
   343			err = -EINVAL;
   344			goto err_out;
   345		}
   346	
   347		if (!vma_is_anonymous(vas)) {
   348			err = -EBUSY;
   349			goto err_out;
   350		}
   351	
   352		buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
   353			       sizeof(*pages), GFP_KERNEL);
   354		if (!buf) {
   355			err = -ENOMEM;
   356			goto err_out;
   357		}
   358		dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
   359		pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
   360	
   361		zdd = drm_pagemap_zdd_alloc(pgmap_owner);
   362		if (!zdd) {
   363			err = -ENOMEM;
   364			goto err_free;
   365		}
   366	
   367		migrate.vma = vas;
   368		migrate.src = buf;
   369		migrate.dst = migrate.src + npages;
   370	
   371		err = migrate_vma_setup(&migrate);
   372		if (err)
   373			goto err_free;
   374	
   375		if (!migrate.cpages) {
   376			err = -EFAULT;
   377			goto err_free;
   378		}
   379	
   380		if (migrate.cpages != npages) {
   381			err = -EBUSY;
   382			goto err_finalize;
   383		}
   384	
   385		err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
   386		if (err)
   387			goto err_finalize;
   388	
   389		err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
   390						   migrate.src, npages, DMA_TO_DEVICE);
   391		if (err)
   392			goto err_finalize;
   393	
   394		for (i = 0; i < npages; ++i) {
   395			struct page *page = pfn_to_page(migrate.dst[i]);
   396	
   397			pages[i] = page;
   398			migrate.dst[i] = migrate_pfn(migrate.dst[i]);
   399			drm_pagemap_get_devmem_page(page, zdd);
   400		}
   401	
   402		err = ops->copy_to_devmem(pages, dma_addr, npages);
   403		if (err)
   404			goto err_finalize;
   405	
   406		/* Upon success bind devmem allocation to range and zdd */
   407		devmem_allocation->timeslice_expiration = get_jiffies_64() +
   408			msecs_to_jiffies(timeslice_ms);
   409		zdd->devmem_allocation = devmem_allocation;	/* Owns ref */
   410	
   411	err_finalize:
   412		if (err)
   413			drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
   414		migrate_vma_pages(&migrate);
   415		migrate_vma_finalize(&migrate);
   416		drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
   417					       DMA_TO_DEVICE);
   418	err_free:
   419		if (zdd)
   420			drm_pagemap_zdd_put(zdd);
   421		kvfree(buf);
   422	err_out:
   423		return err;
   424	}
   425	EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
   426	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


More information about the Intel-xe mailing list