[Intel-gfx] [PATCH v6 03/13] drm/i915/guc: Add XE_LP steered register lists support
Alan Previn
alan.previn.teres.alexis at intel.com
Sat Feb 26 05:55:16 UTC 2022
Add the ability for runtime allocation and freeing of
steered register list extentions that depend on the
detected HW config fuses.
Signed-off-by: Alan Previn <alan.previn.teres.alexis at intel.com>
---
drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h | 9 +
.../gpu/drm/i915/gt/uc/intel_guc_capture.c | 175 ++++++++++++++++--
2 files changed, 173 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index f3e305bc05bb..8478e416dead 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -50,6 +50,7 @@ struct __guc_mmio_reg_descr_group {
u32 owner; /* see enum guc_capture_owner */
u32 type; /* see enum guc_capture_type */
u32 engine; /* as per MAX_ENGINE_CLASS */
+ struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */
};
/**
@@ -77,6 +78,14 @@ struct __guc_state_capture_priv {
*/
const struct __guc_mmio_reg_descr_group *reglists;
+ /**
+ * @extlists: allocated table of steered register lists used for error-capture state.
+ *
+ * NOTE: steered registers have multiple instances depending on the HW configuration
+ * (slices or dual-sub-slices) and thus depends on HW fuses discovered at startup
+ */
+ struct __guc_mmio_reg_descr_group *extlists;
+
/**
* @ads_cache: cached register lists that is ADS format ready
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 40c715aa5b6b..01c0c5faaadb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -132,6 +132,7 @@ static const struct __guc_mmio_reg_descr empty_regs_list[] = {
TO_GCAP_DEF_OWNER(regsowner), \
TO_GCAP_DEF_TYPE(regstype), \
class, \
+ NULL, \
}
/* List of lists */
@@ -149,28 +150,33 @@ static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
};
static const struct __guc_mmio_reg_descr_group *
-guc_capture_get_device_reglist(struct intel_guc *guc)
+guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int i;
- if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915) ||
- IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
- return xe_lpd_lists;
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].list; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return ®lists[i];
}
return NULL;
}
-static const struct __guc_mmio_reg_descr_group *
-guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
- u32 owner, u32 type, u32 id)
+static struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
{
int i;
if (!reglists)
return NULL;
- for (i = 0; reglists[i].list; ++i) {
+ for (i = 0; reglists[i].extlist; ++i) {
if (reglists[i].owner == owner && reglists[i].type == type &&
(reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
return ®lists[i];
@@ -179,6 +185,127 @@ guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
return NULL;
}
+static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
+{
+ int i = 0;
+
+ if (!reglists)
+ return;
+
+ while (reglists[i].extlist)
+ kfree(reglists[i++].extlist);
+}
+
+struct __ext_steer_reg {
+ const char *name;
+ i915_reg_t reg;
+};
+
+static const struct __ext_steer_reg xe_extregs[] = {
+ {"GEN7_SAMPLER_INSTDONE", GEN7_SAMPLER_INSTDONE},
+ {"GEN7_ROW_INSTDONE", GEN7_ROW_INSTDONE}
+};
+
+static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
+ const struct __ext_steer_reg *extlist,
+ int slice_id, int subslice_id)
+{
+ ext->reg = extlist->reg;
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->regname = extlist->name;
+}
+
+static int
+__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
+ const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
+{
+ struct __guc_mmio_reg_descr *list;
+
+ list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ newlist->extlist = list;
+ newlist->num_regs = num_regs;
+ newlist->owner = rootlist->owner;
+ newlist->engine = rootlist->engine;
+ newlist->type = rootlist->type;
+
+ return 0;
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int slice, subslice, i, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+ struct sseu_dev_info *sseu;
+
+ /* In XE_LPD we only have steered registers for the render-class */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture.priv->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+
+ sseu = >->info.sseu;
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice)
+ num_tot_regs += num_steer_regs;
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
+ for (i = 0; i < num_steer_regs; ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+
+ guc->capture.priv->extlists = extlists;
+}
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_device_reglist(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915) ||
+ IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these at init time based on hw config add it as an extension
+ * list at the end of the pre-populated render list.
+ */
+ guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists);
+ return xe_lpd_lists;
+ }
+
+ drm_warn(&i915->drm, "No GuC-capture register lists\n");
+
+ return NULL;
+}
+
static const char *
__stringify_owner(u32 owner)
{
@@ -249,10 +376,12 @@ static int
guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
{
- u32 i = 0;
+ u32 i = 0, j = 0;
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
const struct __guc_mmio_reg_descr_group *reglists = guc->capture.priv->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture.priv->extlists;
const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
if (!reglists)
return -ENODEV;
@@ -271,6 +400,17 @@ guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
ptr[i].mask = match->list[i].mask;
}
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
+ if (matchext) {
+ for (i = match->num_regs, j = 0; i < num_entries &&
+ i < (match->num_regs + matchext->num_regs) &&
+ j < matchext->num_regs; ++i, ++j) {
+ ptr[i].offset = matchext->extlist[j].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = matchext->extlist[j].flags;
+ ptr[i].mask = matchext->extlist[j].mask;
+ }
+ }
if (i < num_entries)
drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
(int)i, (int)num_entries);
@@ -282,12 +422,20 @@ static int
guc_cap_list_num_regs(struct __guc_state_capture_priv *gc, u32 owner, u32 type, u32 classid)
{
const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int num_regs;
match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
if (!match)
return 0;
- return match->num_regs;
+ num_regs = match->num_regs;
+
+ matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
+ if (matchext)
+ num_regs += matchext->num_regs;
+
+ return num_regs;
}
int
@@ -397,6 +545,11 @@ void intel_guc_capture_destroy(struct intel_guc *guc)
guc_capture_free_ads_cache(guc->capture.priv);
+ if (guc->capture.priv->extlists) {
+ guc_capture_free_extlists(guc->capture.priv->extlists);
+ kfree(guc->capture.priv->extlists);
+ }
+
kfree(guc->capture.priv);
guc->capture.priv = NULL;
}
--
2.25.1
More information about the Intel-gfx
mailing list