[bug report] drm/xe: Introduce a new DRM driver for Intel GPUs

Dan Carpenter dan.carpenter at linaro.org
Sat Jun 8 14:23:28 UTC 2024


Hello Matthew Brost,

Commit dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel
GPUs") from Mar 30, 2023 (linux-next), leads to the following Smatch
static checker warning:

	drivers/gpu/drm/xe/xe_pt.c:671 xe_pt_stage_bind()
	error: we previously assumed 'bo' could be null (see line 602)

drivers/gpu/drm/xe/xe_pt.c
    596 static int
    597 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
    598                  struct xe_vm_pgtable_update *entries, u32 *num_entries)
    599 {
    600         struct xe_device *xe = tile_to_xe(tile);
    601         struct xe_bo *bo = xe_vma_bo(vma);
    602         bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
    603                 (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
    604         struct xe_res_cursor curs;
    605         struct xe_pt_stage_bind_walk xe_walk = {
    606                 .base = {
    607                         .ops = &xe_pt_stage_bind_ops,
    608                         .shifts = xe_normal_pt_shifts,
    609                         .max_level = XE_PT_HIGHEST_LEVEL,
    610                 },
    611                 .vm = xe_vma_vm(vma),
    612                 .tile = tile,
    613                 .curs = &curs,
    614                 .va_curs_start = xe_vma_start(vma),
    615                 .vma = vma,
    616                 .wupd.entries = entries,
    617                 .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
    618         };
    619         struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
    620         int ret;
    621 
    622         /**
    623          * Default atomic expectations for different allocation scenarios are as follows:
    624          *
    625          * 1. Traditional API: When the VM is not in LR mode:
    626          *    - Device atomics are expected to function with all allocations.
    627          *
    628          * 2. Compute/SVM API: When the VM is in LR mode:
    629          *    - Device atomics are the default behavior when the bo is placed in a single region.
    630          *    - In all other cases device atomics will be disabled with AE=0 until an application
    631          *      request differently using a ioctl like madvise.
    632          */
    633         if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
    634                 if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
    635                         if (bo && xe_bo_has_single_placement(bo))
                                    ^^
Checks for NULL

    636                                 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
    637                         /**
    638                          * If a SMEM+LMEM allocation is backed by SMEM, a device
    639                          * atomics will cause a gpu page fault and which then
    640                          * gets migrated to LMEM, bind such allocations with
    641                          * device atomics enabled.
    642                          */
    643                         else if (is_devmem && !xe_bo_has_single_placement(bo))
                                                                                  ^^

is_devmem ca only be true when bo is valid so it's ok that
xe_bo_has_single_placement() doesn't check.

    644                                 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
    645                 } else {
    646                         xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
    647                 }
    648 
    649                 /**
    650                  * Unset AE if the platform(PVC) doesn't support it on an
    651                  * allocation
    652                  */
    653                 if (!xe->info.has_device_atomics_on_smem && !is_devmem)
    654                         xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
    655         }
    656 
    657         if (is_devmem) {
    658                 xe_walk.default_pte |= XE_PPGTT_PTE_DM;
    659                 xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
    660         }
    661 
    662         if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
    663                 xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
    664 
    665         xe_bo_assert_held(bo);
                                  ^^
These asserts have a check for NULL

    666 
    667         if (!xe_vma_is_null(vma)) {
    668                 if (xe_vma_is_userptr(vma))
    669                         xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
    670                                         xe_vma_size(vma), &curs);
--> 671                 else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
                                               ^^                     ^^
Dereferenced

    672                         xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
    673                                      xe_vma_size(vma), &curs);
    674                 else
    675                         xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
                                                         ^^
More unchecked dereferences

    676                                         xe_vma_size(vma), &curs);
    677         } else {
    678                 curs.size = xe_vma_size(vma);
    679         }
    680 
    681         ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
    682                                xe_vma_end(vma), &xe_walk.base);
    683 
    684         *num_entries = xe_walk.wupd.num_used_entries;
    685         return ret;
    686 }

regards,
dan carpenter


More information about the Intel-xe mailing list