[PATCH] drm/xe/mocs: Add debugfs node to dump mocs
Lucas De Marchi
lucas.demarchi at intel.com
Fri Mar 22 16:38:33 UTC 2024
On Thu, Mar 21, 2024 at 06:37:39PM +0530, Janga Rahul Kumar wrote:
>This is useful to check mocs configuration. Tests/Tools can use
>this debugfs entry to get mocs info.
>
>Cc: Matt Roper <matthew.d.roper at intel.com>
>Signed-off-by: Janga Rahul Kumar <janga.rahul.kumar at intel.com>
>---
> drivers/gpu/drm/xe/xe_gt_debugfs.c | 13 +++++++++
> drivers/gpu/drm/xe/xe_mocs.c | 45 ++++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_mocs.h | 8 ++++++
> 3 files changed, 66 insertions(+)
>
>diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
>index 6b4dc2927727..918be3b90ba9 100644
>--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
>+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
>@@ -17,6 +17,7 @@
> #include "xe_hw_engine.h"
> #include "xe_lrc.h"
> #include "xe_macros.h"
>+#include "xe_mocs.h"
> #include "xe_pat.h"
> #include "xe_pm.h"
> #include "xe_reg_sr.h"
>@@ -233,6 +234,17 @@ static int vecs_default_lrc(struct seq_file *m, void *data)
> return 0;
> }
>
>+static int mocs(struct seq_file *m, void *data)
>+{
>+ struct xe_gt *gt = node_to_gt(m->private);
>+ struct drm_printer p = drm_seq_file_printer(m);
>+
>+ xe_pm_runtime_get(gt_to_xe(gt));
>+ xe_mocs_dump(gt, &p);
>+ xe_pm_runtime_put(gt_to_xe(gt));
>+
>+ return 0;
>+}
> static const struct drm_info_list debugfs_list[] = {
> {"hw_engines", hw_engines, 0},
> {"force_reset", force_reset, 0},
>@@ -248,6 +260,7 @@ static const struct drm_info_list debugfs_list[] = {
> {"default_lrc_bcs", bcs_default_lrc},
> {"default_lrc_vcs", vcs_default_lrc},
> {"default_lrc_vecs", vecs_default_lrc},
>+ {"mocs", mocs, 0},
since it's very similar to PAT, please add it next to that one.
> };
>
> void xe_gt_debugfs_register(struct xe_gt *gt)
>diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
>index bff659d20062..a0416100912b 100644
>--- a/drivers/gpu/drm/xe/xe_mocs.c
>+++ b/drivers/gpu/drm/xe/xe_mocs.c
>@@ -574,6 +574,51 @@ void xe_mocs_init(struct xe_gt *gt)
> init_l3cc_table(gt, &table);
> }
>
>+void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
>+{
>+ struct xe_mocs_info table;
>+ unsigned int flags;
>+ u32 ret, reg_val;
>+ unsigned int i;
>+ struct xe_device *xe = gt_to_xe(gt);
>+
>+ flags = get_mocs_settings(xe, &table);
>+ xe_device_mem_access_get(xe);
>+ ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
>+
>+ if (ret)
>+ goto err_fw;
>+
>+ if (flags & HAS_GLOBAL_MOCS) {
>+ drm_printf(p, "Global mocs registers configuration:\n");
>+ for (i = 0; i < table.n_entries ; i++) {
watch out for coding style. here and in a few places below.
>+ if (regs_are_mcr(gt))
>+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
>+ else
>+ reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
>+
>+ drm_printf(p, "GLOB_MOCS[%d] 0x%x 0x%x\n", i, XELP_GLOBAL_MOCS(i).addr, reg_val);
let's use the same output style as xe_pat_dump()
overall lgtm
Lucas De Marchi
>+ }
>+ }
>+
>+ if (flags & HAS_LNCF_MOCS) {
>+ for (i = 0; i < (table.n_entries + 1)/2 ; i++) {
>+ if (regs_are_mcr(gt))
>+ reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
>+ else
>+ reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
>+
>+ drm_printf(p, "LNCFCMOCS[%d] 0x%x 0x%x\n", i, XELP_LNCFCMOCS(i).addr, reg_val);
>+ }
>+ }
>+
>+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
>+
>+err_fw:
>+ xe_assert(xe, !ret);
>+ xe_device_mem_access_put(xe);
>+}
>+
> #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
> #include "tests/xe_mocs.c"
> #endif
>diff --git a/drivers/gpu/drm/xe/xe_mocs.h b/drivers/gpu/drm/xe/xe_mocs.h
>index 053754c5a94e..d6fa4485a6e9 100644
>--- a/drivers/gpu/drm/xe/xe_mocs.h
>+++ b/drivers/gpu/drm/xe/xe_mocs.h
>@@ -10,8 +10,16 @@
>
> struct xe_exec_queue;
> struct xe_gt;
>+struct drm_printer;
>
> void xe_mocs_init_early(struct xe_gt *gt);
> void xe_mocs_init(struct xe_gt *gt);
>
>+/**
>+ * xe_mocs_dump - Dump mocs table
>+ * @gt: GT structure
>+ * @p: Printer to dump info to
>+ */
>+void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p);
>+
> #endif
>--
>2.25.1
>
More information about the Intel-xe
mailing list