[PATCH] drm/xe/tests: fix the bo evict build break
kernel test robot
lkp at intel.com
Mon Apr 28 07:13:33 UTC 2025
Hi Dave,
kernel test robot noticed the following build errors:
[auto build test ERROR on drm-exynos/exynos-drm-next]
[also build test ERROR on drm/drm-next drm-misc/drm-misc-next drm-tip/drm-tip linus/master v6.15-rc3 next-20250424]
[cannot apply to drm-xe/drm-xe-next drm-intel/for-linux-next drm-intel/for-linux-next-fixes]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Dave-Airlie/drm-xe-tests-fix-the-bo-evict-build-break/20250428-114114
base: https://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git exynos-drm-next
patch link: https://lore.kernel.org/r/20250428034043.407486-1-airlied%40gmail.com
patch subject: [PATCH] drm/xe/tests: fix the bo evict build break
config: loongarch-randconfig-002-20250428 (https://download.01.org/0day-ci/archive/20250428/202504281424.5u93dWXu-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250428/202504281424.5u93dWXu-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp at intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202504281424.5u93dWXu-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from drivers/gpu/drm/xe/xe_migrate.c:1723:
drivers/gpu/drm/xe/tests/xe_migrate.c: In function 'test_migrate':
>> drivers/gpu/drm/xe/tests/xe_migrate.c:515:15: error: too few arguments to function 'xe_bo_evict'
515 | ret = xe_bo_evict(vram_bo);
| ^~~~~~~~~~~
In file included from drivers/gpu/drm/xe/xe_migrate.c:23:
drivers/gpu/drm/xe/xe_bo.h:274:5: note: declared here
274 | int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
| ^~~~~~~~~~~
--
In file included from drivers/gpu/drm/xe/xe_dma_buf.c:322:
drivers/gpu/drm/xe/tests/xe_dma_buf.c: In function 'check_residency':
>> drivers/gpu/drm/xe/tests/xe_dma_buf.c:68:15: error: too few arguments to function 'xe_bo_evict'
68 | ret = xe_bo_evict(exported);
| ^~~~~~~~~~~
In file included from drivers/gpu/drm/xe/xe_dma_buf.c:17:
drivers/gpu/drm/xe/xe_bo.h:274:5: note: declared here
274 | int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
| ^~~~~~~~~~~
--
In file included from drivers/gpu/drm/xe/xe_bo.c:2949:
drivers/gpu/drm/xe/tests/xe_bo.c: In function 'ccs_test_migrate':
>> drivers/gpu/drm/xe/tests/xe_bo.c:63:15: error: too few arguments to function 'xe_bo_evict'
63 | ret = xe_bo_evict(bo);
| ^~~~~~~~~~~
drivers/gpu/drm/xe/xe_bo.c:2769:5: note: declared here
2769 | int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
| ^~~~~~~~~~~
vim +/xe_bo_evict +515 drivers/gpu/drm/xe/tests/xe_migrate.c
493
494 static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
495 struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
496 struct kunit *test)
497 {
498 struct dma_fence *fence;
499 u64 expected, retval;
500 long timeout;
501 long ret;
502
503 expected = 0xd0d0d0d0d0d0d0d0;
504 xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size);
505
506 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test);
507 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) {
508 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
509 if (retval == expected)
510 KUNIT_FAIL(test, "Sanity check failed: VRAM must have compressed value\n");
511 }
512 dma_fence_put(fence);
513
514 kunit_info(test, "Evict vram buffer object\n");
> 515 ret = xe_bo_evict(vram_bo);
516 if (ret) {
517 KUNIT_FAIL(test, "Failed to evict bo.\n");
518 return;
519 }
520
521 ret = xe_bo_vmap(vram_bo);
522 if (ret) {
523 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
524 return;
525 }
526
527 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
528 check(retval, expected, "Clear evicted vram data first value", test);
529 retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
530 check(retval, expected, "Clear evicted vram data last value", test);
531
532 fence = blt_copy(tile, vram_bo, ccs_bo,
533 true, "Blit surf copy from vram to sysmem", test);
534 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
535 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
536 check(retval, 0, "Clear ccs data first value", test);
537
538 retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
539 check(retval, 0, "Clear ccs data last value", test);
540 }
541 dma_fence_put(fence);
542
543 kunit_info(test, "Restore vram buffer object\n");
544 ret = xe_bo_validate(vram_bo, NULL, false);
545 if (ret) {
546 KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
547 return;
548 }
549
550 /* Sync all migration blits */
551 timeout = dma_resv_wait_timeout(vram_bo->ttm.base.resv,
552 DMA_RESV_USAGE_KERNEL,
553 true,
554 5 * HZ);
555 if (timeout <= 0) {
556 KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
557 return;
558 }
559
560 ret = xe_bo_vmap(vram_bo);
561 if (ret) {
562 KUNIT_FAIL(test, "Failed to vmap vram bo: %li\n", ret);
563 return;
564 }
565
566 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64);
567 check(retval, expected, "Restored value must be equal to initial value", test);
568 retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64);
569 check(retval, expected, "Restored value must be equal to initial value", test);
570
571 fence = blt_copy(tile, vram_bo, ccs_bo,
572 true, "Blit surf copy from vram to sysmem", test);
573 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) {
574 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64);
575 check(retval, 0, "Clear ccs data first value", test);
576 retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64);
577 check(retval, 0, "Clear ccs data last value", test);
578 }
579 dma_fence_put(fence);
580 }
581
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
More information about the Intel-xe
mailing list