[PATCH v2 3/3] drm/xe: Add CLOS specific initializations
Mishra, Pallavi
pallavi.mishra at intel.com
Fri Jan 12 18:59:12 UTC 2024
> -----Original Message-----
> From: Welty, Brian <brian.welty at intel.com>
> Sent: Thursday, January 11, 2024 5:02 PM
> To: Mishra, Pallavi <pallavi.mishra at intel.com>; intel-xe at lists.freedesktop.org
> Cc: Naklicki, Mateusz <mateusz.naklicki at intel.com>; Vishwanathapura,
> Niranjana <niranjana.vishwanathapura at intel.com>
> Subject: Re: [PATCH v2 3/3] drm/xe: Add CLOS specific initializations
>
>
>
> On 1/9/2024 3:57 PM, Pallavi Mishra wrote:
> > Handle CLOS specific initializations and PAT CLOS compatibility check.
> >
> > Signed-off-by: Pallavi Mishra <pallavi.mishra at intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_device.c | 15 ++++++++++++++
> > drivers/gpu/drm/xe/xe_pat.c | 36
> ++++++++++++++++++++++++++++++++++
> > drivers/gpu/drm/xe/xe_pat.h | 9 +++++++++
> > drivers/gpu/drm/xe/xe_vm.c | 12 ++++++++++++
> > 4 files changed, 72 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_device.c
> > b/drivers/gpu/drm/xe/xe_device.c index 004e65544e8d..4e3d4f2c0f9b
> > 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -43,6 +43,7 @@
> > #include "xe_vm.h"
> > #include "xe_wait_user_fence.h"
> > #include "xe_hwmon.h"
> > +#include "xe_clos.h"
> >
> > #ifdef CONFIG_LOCKDEP
> > struct lockdep_map xe_device_mem_access_lockdep_map = { @@ -81,6
> > +82,7 @@ static int xe_file_open(struct drm_device *dev, struct drm_file
> *file)
> > xe->clients.count++;
> > spin_unlock(&xe->clients.lock);
> >
> > + init_client_clos(xef);
> > file->driver_priv = xef;
> > return 0;
> > }
> > @@ -101,6 +103,9 @@ static void xe_file_close(struct drm_device *dev,
> struct drm_file *file)
> > xe_exec_queue_kill(q);
> > xe_exec_queue_put(q);
> > }
> > +
> > + uninit_client_clos(xef);
> > +
> > mutex_unlock(&xef->exec_queue.lock);
> > xa_destroy(&xef->exec_queue.xa);
> > mutex_destroy(&xef->exec_queue.lock);
> > @@ -138,6 +143,12 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> > DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE,
> xe_wait_user_fence_ioctl,
> > DRM_RENDER_ALLOW),
> > + DRM_IOCTL_DEF_DRV(XE_CLOS_RESERVE, xe_clos_reserve_ioctl,
> > + DRM_RENDER_ALLOW),
> > + DRM_IOCTL_DEF_DRV(XE_CLOS_FREE, xe_clos_free_ioctl,
> > + DRM_RENDER_ALLOW),
> > + DRM_IOCTL_DEF_DRV(XE_CLOS_SET_WAYS, xe_clos_set_ways_ioctl,
> > + DRM_RENDER_ALLOW),
> > };
> >
> > static const struct file_operations xe_driver_fops = { @@ -542,6
> > +553,8 @@ int xe_device_probe(struct xe_device *xe)
> >
> > xe_hwmon_register(xe);
> >
> > + init_device_clos(xe);
> > +
> > err = drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
> > if (err)
> > return err;
> > @@ -574,6 +587,8 @@ void xe_device_remove(struct xe_device *xe)
> >
> > xe_heci_gsc_fini(xe);
> >
> > + uninit_device_clos(xe);
> > +
> > xe_irq_shutdown(xe);
> > }
> >
> > diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
> > index 1ff6bc79e7d4..ccdefba0f2f3 100644
> > --- a/drivers/gpu/drm/xe/xe_pat.c
> > +++ b/drivers/gpu/drm/xe/xe_pat.c
> > @@ -45,6 +45,14 @@
> > #define XELP_PAT_WC
> REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 1)
> > #define XELP_PAT_UC
> REG_FIELD_PREP(XELP_MEM_TYPE_MASK, 0)
> >
> > +#define XE2_PAT_CLOS1 ((1 << 20)|(1 << 21)|(1 << 22)|(1 <<
> 23))
> > +#define XE2_PAT_CLOS2 ((1 << 24)|(1 << 25)|(1 << 26)|(1 <<
> 27))
> > +#define XE2_PAT_CLOS3 ((1 << 28)|(1 << 29)|(1 << 30)|(1 <<
> 31))
> > +
> > +#define XEPVC_PAT_CLOS1 ((1 << 4)|(1 << 5))
> > +#define XEPVC_PAT_CLOS2 ((1 << 6)|(1 << 7))
> > +
> > +
> > static const char *XELP_MEM_TYPE_STR_MAP[] = { "UC", "WC", "WT",
> > "WB" };
> >
> > struct xe_pat_ops {
> > @@ -148,6 +156,34 @@ u16 xe_pat_index_get_coh_mode(struct xe_device
> *xe, u16 pat_index)
> > return xe->pat.table[pat_index].coh_mode;
> > }
> >
> > +int xe_pat_index_clos_check(struct xe_device *xe, u16 pat_index, u16
> > +clos_index) {
> > + WARN_ON(pat_index >= xe->pat.n_entries);
> > +
> > + int err = 0;
> > +
> > + switch (clos_index) {
> > + case 1:
> > + if (!(((1 << pat_index) & XE2_PAT_CLOS1)
> > + || (1 << pat_index & XEPVC_PAT_CLOS1)))
>
> Don't you want to test just one of the XE2 bitmask or XEPVC bitmask.
> Based on what the running platform is?
> Isn't it okay for pat_index to be outside the XEPVC range on XE2?
>
> I guess I thought we could simplify this more....
> Instead of using the defined bitmask constants here, we could instead store
> those values for current platform in xe->info.
>
> So then only need to test here something like:
> ((1 << pat_index) & xe->info.pat.clos_mask[clos_index])
> And can remove the whole switch statement.
I misunderstood earlier. I see your point now. Yes I can pull these bitmask constants under xe->info.
Switch statement should no longer be required then. Will re do this bit and send.
Thanks,
Pallavi
> Thoughts? Maybe want to get feedback from maintainers on this.
>
>
>
> > + err = -EINVAL;
> > + break;
> > + case 2:
> > + if (!(((1 << pat_index) & XE2_PAT_CLOS2)
> > + || (1 << pat_index & XEPVC_PAT_CLOS2)))
> > + err = -EINVAL;
> > + break;
> > + case 3:
> > + if (!((1 << pat_index) & XE2_PAT_CLOS3))
> > + err = -EINVAL;
> > + break;
> > + default:
> > + drm_err(&xe->drm, "Unsupported CLOS value\n");
> > + err = -EINVAL;
> > + }
> > + return err;
> > +}
> > +
> > static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
> table[],
> > int n_entries)
> > {
> > diff --git a/drivers/gpu/drm/xe/xe_pat.h b/drivers/gpu/drm/xe/xe_pat.h
> > index fa0dfbe525cd..afac06bc425f 100644
> > --- a/drivers/gpu/drm/xe/xe_pat.h
> > +++ b/drivers/gpu/drm/xe/xe_pat.h
> > @@ -58,4 +58,13 @@ void xe_pat_dump(struct xe_gt *gt, struct
> drm_printer *p);
> > */
> > u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index);
> >
> > +/**
> > + * xe_pat_index_clos_check - check whether clos has been reserved for
> > + * chosen pat_index.
> > + * @xe: xe device
> > + * @pat_index: The pat_index to query
> > + * @clos_index: clos index to compare */ int
> > +xe_pat_index_clos_check(struct xe_device *xe, u16 pat_index, u16
> > +clos_index);
> > +
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 1ca917b8315c..8e8c0302c8a0 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -2800,6 +2800,18 @@ static int vm_bind_ioctl_check_args(struct
> xe_device *xe,
> > err = -EINVAL;
> > goto free_bind_ops;
> > }
> > +
> > + /* check whether Clos has been reserved for chosen pat */
> > + if ((GRAPHICS_VER(xe) >= 20 && (pat_index > 19)) || (xe-
> >info.platform == XE_PVC && (pat_index > 3))) {
> > + mutex_lock(&xe->cache_resv.clos_mutex);
> > + err = xe_pat_index_clos_check(xe, pat_index, xe-
> >cache_resv.clos_index);
> > + if (err) {
> > + mutex_unlock(&xe->cache_resv.clos_mutex);
> > + goto free_bind_ops;
> > + }
> > + mutex_unlock(&xe->cache_resv.clos_mutex);
> > + }
> > +
> > }
> >
> > return 0;
More information about the Intel-xe
mailing list