[Intel-xe] [PATCH v2] drm/xe: use variable instead of multiple function calls

Dani Liberman dliberman at habana.ai
Tue Oct 31 21:09:45 UTC 2023


On 31/10/2023 22:09, Ruhl, Michael J wrote:




-----Original Message-----
From: Intel-xe <intel-xe-bounces at lists.freedesktop.org><mailto:intel-xe-bounces at lists.freedesktop.org> On Behalf Of Dani
Liberman
Sent: Tuesday, October 31, 2023 1:12 PM
To: intel-xe at lists.freedesktop.org<mailto:intel-xe at lists.freedesktop.org>
Subject: [Intel-xe] [PATCH v2] drm/xe: use variable instead of multiple function
calls

Using function calls with negative logic was a bit confusing,
positive logic is more readable.

v2:
- Update commit message

Cc: Matthew Brost <matthew.brost at intel.com><mailto:matthew.brost at intel.com>
Cc: Ville Syrjala <ville.syrjala at linux.intel.com><mailto:ville.syrjala at linux.intel.com>
Signed-off-by: Dani Liberman <dliberman at habana.ai><mailto:dliberman at habana.ai>
Reviewed-by: Matthew Brost <matthew.brost at intel.com><mailto:matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_exec.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 28e84a0bbeb0..2de6c2c05078 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -145,7 +145,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
        struct xe_sched_job *job;
        struct dma_fence *rebind_fence;
        struct xe_vm *vm;
-       bool write_locked;
+       bool write_locked, vm_with_dma_fences;
        ktime_t end = 0;
        int err = 0;

@@ -196,8 +196,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
                }
        }

+       vm_with_dma_fences = !xe_vm_no_dma_fences(vm);
retry:



Are you 100% certain that this value will never change on retry?

M

It depends on vm flags that are getting set on vm create, so yes.


Dani






-       if (!xe_vm_no_dma_fences(vm) && xe_vm_userptr_check_repin(vm)) {
+       if (vm_with_dma_fences && xe_vm_userptr_check_repin(vm)) {
                err = down_write_killable(&vm->lock);
                write_locked = true;
        } else {
@@ -279,7 +280,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
        }

        /* Wait behind munmap style rebinds */
-       if (!xe_vm_no_dma_fences(vm)) {
+       if (vm_with_dma_fences) {
                err = drm_sched_job_add_resv_dependencies(&job->drm,
                                                          &vm->resv,

DMA_RESV_USAGE_KERNEL);
@@ -292,7 +293,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
        if (err)
                goto err_put_job;

-       if (!xe_vm_no_dma_fences(vm)) {
+       if (vm_with_dma_fences) {
                err = down_read_interruptible(&vm->userptr.notifier_lock);
                if (err)
                        goto err_put_job;
@@ -307,7 +308,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
         * the job and let the DRM scheduler / backend clean up the job.
         */
        xe_sched_job_arm(job);
-       if (!xe_vm_no_dma_fences(vm)) {
+       if (vm_with_dma_fences) {
                /* Block userptr invalidations / BO eviction */
                dma_resv_add_fence(&vm->resv,
                                   &job->drm.s_fence->finished,
@@ -330,14 +331,14 @@ int xe_exec_ioctl(struct drm_device *dev, void
*data, struct drm_file *file)
        xe_sched_job_push(job);
        xe_vm_reactivate_rebind(vm);

-       if (!err && !xe_vm_no_dma_fences(vm)) {
+       if (!err && vm_with_dma_fences) {
                spin_lock(&xe->ttm.lru_lock);
                ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
                spin_unlock(&xe->ttm.lru_lock);
        }

err_repin:
-       if (!xe_vm_no_dma_fences(vm))
+       if (vm_with_dma_fences)
                up_read(&vm->userptr.notifier_lock);
err_put_job:
        if (err)
--
2.34.1





-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/intel-xe/attachments/20231031/2ef4a5ee/attachment.htm>


More information about the Intel-xe mailing list