[PATCH 2/3] drm/xe/uc: Use managed bo for HuC and GSC objects
Lucas De Marchi
lucas.demarchi at intel.com
Thu Aug 15 20:00:40 UTC 2024
On Fri, Aug 09, 2024 at 04:12:36PM GMT, Daniele Ceraolo Spurio wrote:
>Drmm actions are not the right ones to clean up BOs and we should use
>devm instead. However, we can also instead just allocate the objects
>using the managed_bo function, which will internally register the
>correct cleanup call and therefore allows us to simplify the code.
>
>While at it, switch to drmm_kzalloc for the GSC proxy allocation to
>further simplify the cleanup.
>
>Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
>Cc: John Harrison <John.C.Harrison at Intel.com>
>Cc: Alan Previn <alan.previn.teres.alexis at intel.com>
>---
> drivers/gpu/drm/xe/xe_gsc.c | 12 +++--------
> drivers/gpu/drm/xe/xe_gsc_proxy.c | 36 ++++++-------------------------
> drivers/gpu/drm/xe/xe_huc.c | 19 +++++-----------
> 3 files changed, 14 insertions(+), 53 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
>index 66963e26c574..b84f2e020c8c 100644
>--- a/drivers/gpu/drm/xe/xe_gsc.c
>+++ b/drivers/gpu/drm/xe/xe_gsc.c
>@@ -450,11 +450,6 @@ static void free_resources(struct drm_device *drm, void *arg)
> xe_exec_queue_put(gsc->q);
> gsc->q = NULL;
> }
>-
>- if (gsc->private) {
>- xe_bo_unpin_map_no_vm(gsc->private);
>- gsc->private = NULL;
>- }
> }
>
> int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
>@@ -474,10 +469,9 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
> if (!hwe)
> return -ENODEV;
>
>- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
>- ttm_bo_type_kernel,
>- XE_BO_FLAG_STOLEN |
>- XE_BO_FLAG_GGTT);
>+ bo = xe_managed_bo_create_pin_map(xe, tile, SZ_4M,
>+ XE_BO_FLAG_STOLEN |
>+ XE_BO_FLAG_GGTT);
> if (IS_ERR(bo))
> return PTR_ERR(bo);
>
>diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
>index aa812a2bc3ed..8f880c44211d 100644
>--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
>+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
>@@ -376,27 +376,6 @@ static const struct component_ops xe_gsc_proxy_component_ops = {
> .unbind = xe_gsc_proxy_component_unbind,
> };
>
>-static void proxy_channel_free(struct drm_device *drm, void *arg)
>-{
>- struct xe_gsc *gsc = arg;
>-
>- if (!gsc->proxy.bo)
>- return;
>-
>- if (gsc->proxy.to_csme) {
>- kfree(gsc->proxy.to_csme);
>- gsc->proxy.to_csme = NULL;
>- gsc->proxy.from_csme = NULL;
>- }
>-
>- if (gsc->proxy.bo) {
>- iosys_map_clear(&gsc->proxy.to_gsc);
>- iosys_map_clear(&gsc->proxy.from_gsc);
clearing these was not important?
Lucas De Marchi
>- xe_bo_unpin_map_no_vm(gsc->proxy.bo);
>- gsc->proxy.bo = NULL;
>- }
>-}
>-
> static int proxy_channel_alloc(struct xe_gsc *gsc)
> {
> struct xe_gt *gt = gsc_to_gt(gsc);
>@@ -405,18 +384,15 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
> struct xe_bo *bo;
> void *csme;
>
>- csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
>+ csme = drmm_kzalloc(&xe->drm, GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
> if (!csme)
> return -ENOMEM;
>
>- bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
>- ttm_bo_type_kernel,
>- XE_BO_FLAG_SYSTEM |
>- XE_BO_FLAG_GGTT);
>- if (IS_ERR(bo)) {
>- kfree(csme);
>+ bo = xe_managed_bo_create_pin_map(xe, tile, GSC_PROXY_CHANNEL_SIZE,
>+ XE_BO_FLAG_SYSTEM |
>+ XE_BO_FLAG_GGTT);
>+ if (IS_ERR(bo))
> return PTR_ERR(bo);
>- }
>
> gsc->proxy.bo = bo;
> gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
>@@ -424,7 +400,7 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
> gsc->proxy.to_csme = csme;
> gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
>
>- return drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
>+ return 0;
> }
>
> /**
>diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
>index bec4366e5513..f5459f97af23 100644
>--- a/drivers/gpu/drm/xe/xe_huc.c
>+++ b/drivers/gpu/drm/xe/xe_huc.c
>@@ -43,14 +43,6 @@ huc_to_guc(struct xe_huc *huc)
> return &container_of(huc, struct xe_uc, huc)->guc;
> }
>
>-static void free_gsc_pkt(struct drm_device *drm, void *arg)
>-{
>- struct xe_huc *huc = arg;
>-
>- xe_bo_unpin_map_no_vm(huc->gsc_pkt);
>- huc->gsc_pkt = NULL;
>-}
>-
> #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
> static int huc_alloc_gsc_pkt(struct xe_huc *huc)
> {
>@@ -59,17 +51,16 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
> struct xe_bo *bo;
>
> /* we use a single object for both input and output */
>- bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
>- PXP43_HUC_AUTH_INOUT_SIZE * 2,
>- ttm_bo_type_kernel,
>- XE_BO_FLAG_SYSTEM |
>- XE_BO_FLAG_GGTT);
>+ bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt),
>+ PXP43_HUC_AUTH_INOUT_SIZE * 2,
>+ XE_BO_FLAG_SYSTEM |
>+ XE_BO_FLAG_GGTT);
> if (IS_ERR(bo))
> return PTR_ERR(bo);
>
> huc->gsc_pkt = bo;
>
>- return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
>+ return 0;
> }
>
> int xe_huc_init(struct xe_huc *huc)
>--
>2.43.0
>
More information about the Intel-xe
mailing list