[RFC 7/7] drm/amdgpu: Secure semaphore for usermode queue

Zhang, Yifan Yifan1.Zhang at amd.com
Sun Dec 25 10:07:07 UTC 2022


[Public]

From: Arunpravin Paneer Selvam <arunpravin.paneerselvam at amd.com>

This is a WIP patch, which adds an kernel implementation of secure semaphore for the usermode queues. The UAPI for the same is yet to be implemented.

The idea is to create a RO page and map it to each process requesting a user mode queue, and give them a qnique offset in the page, which can be polled (like wait_mem) for sync.

Cc: Alex Deucher <alexander.deucher at amd.com>
Cc: Christian Koenig <christian.koenig at amd.com>
Cc: Shashank Shamra <shashank.sharma at amd.com>

Signed-off-by: Arunpravin Paneer Selvam <arunpravin.paneerselvam at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Makefile           |   1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu.h           |   8 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c |   7 +-
 .../amd/amdgpu/amdgpu_userqueue_secure_sem.c  | 245 ++++++++++++++++++
 .../drm/amd/include/amdgpu_usermode_queue.h   |  10 +
 .../amd/include/amdgpu_usermode_queue_mqd.h   |   4 +-
 6 files changed, 272 insertions(+), 3 deletions(-)  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_secure_sem.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index e2a34ee57bfb..daec7bb9ab3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -211,6 +211,7 @@ amdgpu-y += amdgpu_amdkfd.o
 
 # add usermode queue
 amdgpu-y += amdgpu_userqueue.o
+amdgpu-y += amdgpu_userqueue_secure_sem.o
 
 ifneq ($(CONFIG_HSA_AMD),)
 AMDKFD_PATH := ../amdkfd
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4b566fcfca18..7325c01efc90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -749,9 +749,17 @@ struct amdgpu_mqd {
 			struct amdgpu_mqd_prop *p);
 };
 
+struct amdgpu_userq_sec_sem {
+	struct amdgpu_bo *sem_obj;
+	u64 gpu_addr;
+	u32 num_sem;
+	unsigned long used[DIV_ROUND_UP(64, BITS_PER_LONG)]; };
+
 struct amdgpu_userq_globals {
 	struct ida ida;
 	struct mutex userq_mutex;
+	struct amdgpu_userq_sec_sem sem;
 };
 
 #define AMDGPU_RESET_MAGIC_NUM 64
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
index b164e24247ca..2af634bbe3dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
@@ -261,6 +261,10 @@ amdgpu_userqueue_setup_mqd(struct amdgpu_device *adev, struct amdgpu_usermode_qu
     /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
     mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
 
+    /* Setup semaphore fence address */
+    mqd->fenceaddress_lo = queue->sem_data.sem_gpu_addr & 0xFFFFFFFC;
+    mqd->fenceaddress_lo = upper_32_bits(queue->sem_data.sem_gpu_addr) 
+ & 0xFFFF;
+

It should be mqd-> fenceaddress_high = upper_32_bits(queue->sem_data.sem_gpu_addr)  & 0xFFFF

     /* activate the queue */
     mqd->cp_gfx_hqd_active = 1;
 }
@@ -472,10 +476,11 @@ int amdgpu_userqueue_init(struct amdgpu_device *adev)
     struct amdgpu_userq_globals *uqg = &adev->userq;
 
     mutex_init(&uqg->userq_mutex);
+    amdgpu_userqueue_sec_sem_init(adev);
     return 0;
 }
 
 void amdgpu_userqueue_fini(struct amdgpu_device *adev)  {
-
+    amdgpu_userqueue_sec_sem_fini(adev);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_secure_sem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_secure_sem.c
new file mode 100644
index 000000000000..6e6a7d62a300
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue_secure_sem.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person 
+obtaining a
+ * copy of this software and associated documentation files (the 
+"Software"),
+ * to deal in the Software without restriction, including without 
+limitation
+ * the rights to use, copy, modify, merge, publish, distribute, 
+sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom 
+the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be 
+included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
+EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
+MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT 
+SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, 
+DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE 
+OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_mes.h"
+#include "amdgpu_usermode_queue.h"
+#include "amdgpu_usermode_queue_mqd.h"
+
+static int amdgpu_userqueue_sem_addr_unmap(struct amdgpu_device *adev,
+					   struct amdgpu_usermode_queue *q) {
+    struct amdgpu_userq_sec_sem_data *sem_bo_data = &q->sem_data;
+    struct amdgpu_bo_va *bo_va = sem_bo_data->sem_data_va;
+    struct amdgpu_vm *vm = bo_va->base.vm;
+    struct amdgpu_bo *bo = adev->userq.sem.sem_obj;
+    struct amdgpu_bo_list_entry vm_pd;
+    struct list_head list, duplicates;
+    struct dma_fence *fence = NULL;
+    struct ttm_validate_buffer tv;
+    struct ww_acquire_ctx ticket;
+    long r = 0;
+
+    INIT_LIST_HEAD(&list);
+    INIT_LIST_HEAD(&duplicates);
+
+    tv.bo = &bo->tbo;
+    tv.num_shared = 2;
+    list_add(&tv.head, &list);
+
+    amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+
+    r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+    if (r) {
+        DRM_ERROR("leaking bo va because we fail to reserve bo (%ld)\n", r);
+        return r;
+    }
+
+    amdgpu_vm_bo_del(adev, bo_va);
+    if (!amdgpu_vm_ready(vm))
+        goto out_unlock;
+
+    r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
+    if (r)
+        goto out_unlock;
+    if (fence) {
+        amdgpu_bo_fence(bo, fence, true);
+        fence = NULL;
+    }
+
+    r = amdgpu_vm_clear_freed(adev, vm, &fence);
+    if (r || !fence)
+        goto out_unlock;
+
+    dma_fence_wait(fence, false);
+    amdgpu_bo_fence(bo, fence, true);
+    dma_fence_put(fence);
+
+out_unlock:
+    if (unlikely(r < 0))
+        DRM_ERROR("failed to clear page tables (%ld)\n", r);
+    ttm_eu_backoff_reservation(&ticket, &list);
+
+    return r;
+}
+
+static u64 amdgpu_sem_bo_vaddr(struct amdgpu_device *adev) {
+       u64 addr = AMDGPU_VA_RESERVED_SIZE;
+
+       /* TODO:Find va address for sem bo mapping */
+       return addr;
+}
+
+static int amdgpu_userqueue_sem_addr_map(struct amdgpu_device *adev,
+                                        struct amdgpu_usermode_queue 
+*q) {
+    struct amdgpu_userq_sec_sem_data *sem_bo_data;
+    struct amdgpu_bo *sem_obj = adev->userq.sem.sem_obj;
+    struct ttm_validate_buffer csa_tv;
+    struct amdgpu_bo_list_entry pd;
+    struct ww_acquire_ctx ticket;
+    struct amdgpu_vm *vm = q->vm;
+    struct amdgpu_bo_va *bo_va;
+    struct amdgpu_sync sync;
+    struct list_head list;
+    int r;
+
+    amdgpu_sync_create(&sync);
+    INIT_LIST_HEAD(&list);
+    INIT_LIST_HEAD(&csa_tv.head);
+
+    sem_bo_data = &q->sem_data;
+
+    csa_tv.bo = &sem_obj->tbo;
+    csa_tv.num_shared = 1;
+
+    list_add(&csa_tv.head, &list);
+    amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+    r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+    if (r) {
+        DRM_ERROR("failed to reserve userqueue sec sem object BO: err=%d\n", r);
+        return r;
+    }
+
+    bo_va = amdgpu_vm_bo_add(adev, vm, sem_obj);
+    if (!bo_va) {
+        ttm_eu_backoff_reservation(&ticket, &list);
+        DRM_ERROR("failed to create bo_va for userqueue sec sem object BO\n");
+        return -ENOMEM;
+    }
+
+    sem_bo_data->sem_gpu_addr = amdgpu_sem_bo_vaddr(adev);
+    r = amdgpu_vm_bo_map(adev, bo_va, sem_bo_data->sem_gpu_addr, 0,
+                        AMDGPU_MAX_USERQUEUE_SEC_SEM * sizeof(u64),
+                        AMDGPU_PTE_READABLE | AMDGPU_PTE_READABLE);
+
+    if (r) {
+        DRM_ERROR("failed to do bo_map on sec sem object BO, err=%d\n", r);
+        goto error;
+    }
+
+    r = amdgpu_vm_bo_update(adev, bo_va, false);
+    if (r) {
+        DRM_ERROR("failed to do vm_bo_update on sec sem object BO\n");
+        goto error;
+    }
+    amdgpu_sync_fence(&sync, bo_va->last_pt_update);
+
+    r = amdgpu_vm_update_pdes(adev, vm, false);
+    if (r) {
+        DRM_ERROR("failed to update pdes on sec sem object BO\n");
+        goto error;
+    }
+    amdgpu_sync_fence(&sync, vm->last_update);
+
+    amdgpu_sync_wait(&sync, false);
+    ttm_eu_backoff_reservation(&ticket, &list);
+
+    amdgpu_sync_free(&sync);
+    sem_bo_data->sem_data_va = bo_va;
+    return 0;
+
+error:
+    amdgpu_vm_bo_del(adev, bo_va);
+    ttm_eu_backoff_reservation(&ticket, &list);
+    amdgpu_sync_free(&sync);
+    return r;
+}
+
+int amdgpu_userqueue_sec_sem_get(struct amdgpu_device *adev,
+                            struct amdgpu_usermode_queue *q,
+                            u64 *gpu_addr) {
+    unsigned long offset = find_first_zero_bit(adev->userq.sem.used, adev->userq.sem.num_sem);
+    u32 sem_offset;
+    int r;
+
+    if (offset < adev->userq.sem.num_sem) {
+        __set_bit(offset, adev->userq.sem.used);
+        sem_offset = offset << 6; /* convert to qw offset */
+    } else {
+        return -EINVAL;
+    }
+
+    r = amdgpu_userqueue_sem_addr_map(adev, q);
+    if (r) {
+        DRM_ERROR("failed to map sec sem object BO");
+        amdgpu_userqueue_sem_addr_unmap(adev, q);
+        return r;
+    }
+
+    *gpu_addr = sem_offset + q->sem_data.sem_gpu_addr;
+
+    return 0;
+}
+
+void amdgpu_userqueue_sec_sem_free(struct amdgpu_device *adev,
+                                struct amdgpu_usermode_queue *q,
+                                u32 sem) {
+    int r;
+
+    r = amdgpu_userqueue_sem_addr_unmap(adev, q);
+    if (r)
+        DRM_ERROR("failed to unmap sec sem object BO");
+
+    sem >>= 6;
+    if (sem < adev->userq.sem.num_sem)
+        __clear_bit(sem, adev->userq.sem.used); }
+
+int
+amdgpu_userqueue_sec_sem_init(struct amdgpu_device *adev) {
+    int r;
+
+    if (adev->userq.sem.sem_obj == NULL) {
+        /*
+        * AMDGPU_MAX_USERQUEUE_SEC_SEM * sizeof(u64) = AMDGPU_MAX_USERQUEUE_SEC_SEM
+        * 64bit slots
+        */
+        r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_USERQUEUE_SEC_SEM * sizeof(u64),
+                                    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->userq.sem.sem_obj,
+                                    &adev->userq.sem.gpu_addr, NULL);
+        if (r) {
+            DRM_ERROR("Create userqueue SEM bo failed, err %d\n", r);
+            return r;
+        }
+
+        adev->userq.sem.num_sem = AMDGPU_MAX_USERQUEUE_SEC_SEM;
+        memset(&adev->userq.sem.used, 0, sizeof(adev->userq.sem.used));
+    }
+
+    return 0;
+}
+
+void
+amdgpu_userqueue_sec_sem_fini(struct amdgpu_device *adev) {
+    if (adev->userq.sem.sem_obj) {
+        amdgpu_bo_free_kernel(&adev->userq.sem.sem_obj,
+                &adev->userq.sem.gpu_addr,
+                NULL);
+        adev->userq.sem.sem_obj = NULL;
+    }
+}
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/amdgpu_usermode_queue.h b/drivers/gpu/drm/amd/include/amdgpu_usermode_queue.h
index 8bf3c0be6937..630d9b5d2423 100644
--- a/drivers/gpu/drm/amd/include/amdgpu_usermode_queue.h
+++ b/drivers/gpu/drm/amd/include/amdgpu_usermode_queue.h
@@ -25,6 +25,12 @@
 #define AMDGPU_USERMODE_QUEUE_H_
 
 #define AMDGPU_MAX_USERQ 512
+#define AMDGPU_MAX_USERQUEUE_SEC_SEM 64
+
+struct amdgpu_userq_sec_sem_data {
+	u64 sem_gpu_addr;
+	struct amdgpu_bo_va *sem_data_va;
+};
 
 struct amdgpu_userq_ctx {
 	struct amdgpu_bo *obj;
@@ -52,7 +58,11 @@ struct amdgpu_usermode_queue {
 	struct amdgpu_vm    	*vm;
 	struct amdgpu_userq_ctx proc_ctx;
 	struct amdgpu_userq_ctx gang_ctx;
+
+	struct amdgpu_userq_sec_sem_data sem_data;
 	struct list_head 	list;
 };
 
+int amdgpu_userqueue_sec_sem_init(struct amdgpu_device *adev); void 
+amdgpu_userqueue_sec_sem_fini(struct amdgpu_device *adev);
 #endif
diff --git a/drivers/gpu/drm/amd/include/amdgpu_usermode_queue_mqd.h b/drivers/gpu/drm/amd/include/amdgpu_usermode_queue_mqd.h
index d0a285708ba5..e0bfb67d91f4 100644
--- a/drivers/gpu/drm/amd/include/amdgpu_usermode_queue_mqd.h
+++ b/drivers/gpu/drm/amd/include/amdgpu_usermode_queue_mqd.h
@@ -35,8 +35,8 @@ struct amdgpu_usermode_queue_mqd
 	uint32_t fw_work_area_base_hi; // offset: 5  (0x5)
 	uint32_t shadow_initialized; // offset: 6  (0x6)
 	uint32_t ib_vmid; // offset: 7  (0x7)
-	uint32_t reserved_8; // offset: 8  (0x8)
-	uint32_t reserved_9; // offset: 9  (0x9)
+	uint32_t fenceaddress_lo; // offset: 8  (0x8)
+	uint32_t fenceaddress_high; // offset: 9  (0x9)
 	uint32_t reserved_10; // offset: 10  (0xA)
 	uint32_t reserved_11; // offset: 11  (0xB)
 	uint32_t reserved_12; // offset: 12  (0xC)
--
2.34.1


More information about the amd-gfx mailing list