[PATCH i-g-t 1/6] tests/intel/xe_pm: Update runtime pm conditions
Rodrigo Vivi
rodrigo.vivi at intel.com
Wed May 22 14:25:39 UTC 2024
On Wed, May 22, 2024 at 02:46:52PM +0200, Francois Dugast wrote:
> Hi,
>
> On Tue, May 21, 2024 at 11:16:35AM -0400, Rodrigo Vivi wrote:
> > Xe is no longer holding a runtime pm reference for the life
> > of a VM or exec_queue.
> >
> > Also, IGT changes autosuspend time to a minimal time, so we
> > cannot guarantee that rpm is still suspended after the execution
> > has finished.
> >
> > So, the reference usage is not a reliable reference.
> >
> > Hence, start using runtime_active_time as the indicator
> > that runtime_pm resumed upon our actions.
> >
> > v2: Usage of runtime_active_pm and inclusion of mmap tests.
> > v3: Add doc for the exported lib function (Kamil)
> > v4: Use uint64_t for active_time, which is a big number in
> > miliseconds. (Kamil)
>
> s/miliseconds/milliseconds/
>
> > v5: Used PRInt macros instead of %ld (Kamil)
> > Tell what unit of time is returned. (Kamil)
>
> It seems that improvements from v3, v4 and v5 would also apply to
> igt_pm_get_runtime_suspended_time().
New patch 6 in this series:
https://lore.kernel.org/all/20240521151640.280354-6-rodrigo.vivi@intel.com
>
> >
> > Cc: Kamil Konieczny <kamil.konieczny at linux.intel.com>
> > Cc: Badal Nilawar <badal.nilawar at intel.com>
> > Cc: Anshuman Gupta <anshuman.gupta at intel.com>
> > Cc: Francois Dugast <francois.dugast at intel.com>
> > Reviewed-by: Francois Dugast <francois.dugast at intel.com> #v1
> > Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > ---
> > lib/igt_pm.c | 25 +++++++++++++++
> > lib/igt_pm.h | 1 +
> > tests/intel/xe_pm.c | 77 +++++++++++++++++----------------------------
> > 3 files changed, 55 insertions(+), 48 deletions(-)
> >
> > diff --git a/lib/igt_pm.c b/lib/igt_pm.c
> > index fe7692960..baaa6657d 100644
> > --- a/lib/igt_pm.c
> > +++ b/lib/igt_pm.c
> > @@ -1412,6 +1412,31 @@ int igt_pm_get_runtime_suspended_time(struct pci_device *pci_dev)
> > return -1;
> > }
> >
> > +/**
> > + * igt_pm_get_runtime_active_time:
> > + * @pci_dev: PCI device struct
> > + *
> > + * Return: The total time in miliseconds that the device has been active.
>
> s/miliseconds/milliseconds/
>
> With that, feel free to keep my RB.
Thank you so much!
>
> Francois
>
> > + */
> > +uint64_t igt_pm_get_runtime_active_time(struct pci_device *pci_dev)
> > +{
> > + char time_str[64];
> > + int time_fd;
> > + uint64_t time;
> > +
> > + time_fd = igt_pm_get_power_attr_fd_rdonly(pci_dev, "runtime_active_time");
> > + if (igt_pm_read_power_attr(time_fd, time_str, 64, false)) {
> > + igt_assert(sscanf(time_str, "%ld", &time) > 0);
> > +
> > + igt_debug("runtime active time for PCI '%04x:%02x:%02x.%01x' = %" PRIu64 "\n",
> > + pci_dev->domain, pci_dev->bus, pci_dev->dev, pci_dev->func, time);
> > +
> > + return time;
> > + }
> > +
> > + return -1;
> > +}
> > +
> > /**
> > * igt_pm_get_runtime_usage:
> > * @pci_dev: pci device
> > diff --git a/lib/igt_pm.h b/lib/igt_pm.h
> > index 91ee05cd1..15e301533 100644
> > --- a/lib/igt_pm.h
> > +++ b/lib/igt_pm.h
> > @@ -94,6 +94,7 @@ void igt_pm_print_pci_card_runtime_status(void);
> > bool i915_is_slpc_enabled_gt(int drm_fd, int gt);
> > bool i915_is_slpc_enabled(int drm_fd);
> > int igt_pm_get_runtime_suspended_time(struct pci_device *pci_dev);
> > +uint64_t igt_pm_get_runtime_active_time(struct pci_device *pci_dev);
> > int igt_pm_get_runtime_usage(struct pci_device *pci_dev);
> > void igt_pm_ignore_slpc_efficient_freq(int i915, int gtfd, bool val);
> >
> > diff --git a/tests/intel/xe_pm.c b/tests/intel/xe_pm.c
> > index b4a8c4d15..d301ad031 100644
> > --- a/tests/intel/xe_pm.c
> > +++ b/tests/intel/xe_pm.c
> > @@ -185,34 +185,6 @@ static bool in_d3(device_t device, enum igt_acpi_d_state state)
> > return true;
> > }
> >
> > -static bool out_of_d3(device_t device, enum igt_acpi_d_state state)
> > -{
> > - uint16_t val;
> > -
> > - /* Runtime resume needs to be immediate action without any wait */
> > - if (runtime_usage_available(device.pci_xe) &&
> > - igt_pm_get_runtime_usage(device.pci_xe) <= 0)
> > - return false;
> > -
> > - if (igt_get_runtime_pm_status() != IGT_RUNTIME_PM_STATUS_ACTIVE)
> > - return false;
> > -
> > - switch (state) {
> > - case IGT_ACPI_D3Hot:
> > - igt_assert_eq(pci_device_cfg_read_u16(device.pci_xe,
> > - &val, 0xd4), 0);
> > - return (val & 0x3) == 0;
> > - case IGT_ACPI_D3Cold:
> > - return igt_pm_get_acpi_real_d_state(device.pci_root) ==
> > - IGT_ACPI_D0;
> > - default:
> > - igt_info("Invalid D3 State\n");
> > - igt_assert(0);
> > - }
> > -
> > - return true;
> > -}
> > -
> > static void close_fw_handle(int sig)
> > {
> > if (fw_handle < 0)
> > @@ -323,27 +295,28 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> > uint64_t pad;
> > uint32_t data;
> > } *data;
> > - int i, b, rpm_usage;
> > + int i, b;
> > + uint64_t active_time;
> > bool check_rpm = (d_state == IGT_ACPI_D3Hot ||
> > d_state == IGT_ACPI_D3Cold);
> >
> > igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> > igt_assert(n_execs > 0);
> >
> > - if (check_rpm)
> > + if (check_rpm) {
> > igt_assert(in_d3(device, d_state));
> > + active_time = igt_pm_get_runtime_active_time(device.pci_xe);
> > + }
> >
> > vm = xe_vm_create(device.fd_xe, 0, 0);
> >
> > if (check_rpm)
> > - igt_assert(out_of_d3(device, d_state));
> > + igt_assert(igt_pm_get_runtime_active_time(device.pci_xe) >
> > + active_time);
> >
> > bo_size = sizeof(*data) * n_execs;
> > bo_size = xe_bb_size(device.fd_xe, bo_size);
> >
> > - if (check_rpm && runtime_usage_available(device.pci_xe))
> > - rpm_usage = igt_pm_get_runtime_usage(device.pci_xe);
> > -
> > if (flags & USERPTR) {
> > data = aligned_alloc(xe_get_default_alignment(device.fd_xe), bo_size);
> > memset(data, 0, bo_size);
> > @@ -381,8 +354,10 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> > xe_vm_prefetch_async(device.fd_xe, vm, bind_exec_queues[0], 0, addr,
> > bo_size, sync, 1, 0);
> >
> > - if (check_rpm && runtime_usage_available(device.pci_xe))
> > - igt_assert(igt_pm_get_runtime_usage(device.pci_xe) > rpm_usage);
> > + if (check_rpm) {
> > + igt_assert(in_d3(device, d_state));
> > + active_time = igt_pm_get_runtime_active_time(device.pci_xe);
> > + }
> >
> > for (i = 0; i < n_execs; i++) {
> > uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> > @@ -426,9 +401,6 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> > igt_assert(syncobj_wait(device.fd_xe, &sync[0].handle, 1, INT64_MAX, 0,
> > NULL));
> >
> > - if (check_rpm && runtime_usage_available(device.pci_xe))
> > - rpm_usage = igt_pm_get_runtime_usage(device.pci_xe);
> > -
> > sync[0].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
> > if (n_vmas > 1)
> > xe_vm_unbind_all_async(device.fd_xe, vm, 0, bo, sync, 1);
> > @@ -456,15 +428,13 @@ NULL));
> > free(data);
> > }
> >
> > - if (check_rpm && runtime_usage_available(device.pci_xe))
> > - igt_assert(igt_pm_get_runtime_usage(device.pci_xe) < rpm_usage);
> > - if (check_rpm)
> > - igt_assert(out_of_d3(device, d_state));
> > -
> > xe_vm_destroy(device.fd_xe, vm);
> >
> > - if (check_rpm)
> > + if (check_rpm) {
> > + igt_assert(igt_pm_get_runtime_active_time(device.pci_xe) >
> > + active_time);
> > igt_assert(in_d3(device, d_state));
> > + }
> > }
> >
> > /**
> > @@ -558,10 +528,14 @@ static void test_mmap(device_t device, uint32_t placement, uint32_t flags)
> > size_t bo_size = 8192;
> > uint32_t *map = NULL;
> > uint32_t bo;
> > + uint64_t active_time;
> > int i;
> >
> > igt_require_f(placement, "Device doesn't support such memory region\n");
> >
> > + igt_assert(igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED));
> > + active_time = igt_pm_get_runtime_active_time(device.pci_xe);
> > +
> > bo_size = ALIGN(bo_size, xe_get_default_alignment(device.fd_xe));
> >
> > bo = xe_bo_create(device.fd_xe, 0, bo_size, placement, flags);
> > @@ -572,7 +546,8 @@ static void test_mmap(device_t device, uint32_t placement, uint32_t flags)
> > fw_handle = igt_debugfs_open(device.fd_xe, "forcewake_all", O_RDONLY);
> >
> > igt_assert(fw_handle >= 0);
> > - igt_assert(igt_get_runtime_pm_status() == IGT_RUNTIME_PM_STATUS_ACTIVE);
> > + igt_assert(igt_pm_get_runtime_active_time(device.pci_xe) >
> > + active_time);
> >
> > for (i = 0; i < bo_size / sizeof(*map); i++)
> > map[i] = MAGIC_1;
> > @@ -582,22 +557,28 @@ static void test_mmap(device_t device, uint32_t placement, uint32_t flags)
> >
> > /* Runtime suspend and validate the pattern and changed the pattern */
> > close(fw_handle);
> > + sleep(1);
> > +
> > igt_assert(igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED));
> > + active_time = igt_pm_get_runtime_active_time(device.pci_xe);
> >
> > for (i = 0; i < bo_size / sizeof(*map); i++)
> > igt_assert(map[i] == MAGIC_1);
> >
> > /* dgfx page-fault on mmaping should wake the gpu */
> > if (xe_has_vram(device.fd_xe) && flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)
> > - igt_assert(igt_get_runtime_pm_status() == IGT_RUNTIME_PM_STATUS_ACTIVE);
> > + igt_assert(igt_pm_get_runtime_active_time(device.pci_xe) >
> > + active_time);
> >
> > igt_assert(igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED));
> > + active_time = igt_pm_get_runtime_active_time(device.pci_xe);
> >
> > for (i = 0; i < bo_size / sizeof(*map); i++)
> > map[i] = MAGIC_2;
> >
> > if (xe_has_vram(device.fd_xe) && flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)
> > - igt_assert(igt_get_runtime_pm_status() == IGT_RUNTIME_PM_STATUS_ACTIVE);
> > + igt_assert(igt_pm_get_runtime_active_time(device.pci_xe) >
> > + active_time);
> >
> > igt_assert(igt_wait_for_pm_status(IGT_RUNTIME_PM_STATUS_SUSPENDED));
> >
> > --
> > 2.44.0
> >
More information about the igt-dev
mailing list