[PATCH i-g-t 1/2] tests/intel/xe_evict: Reduce allocations to maximum working set

Thomas Hellström thomas.hellstrom at linux.intel.com
Thu Jun 27 07:28:23 UTC 2024


On Thu, 2024-06-27 at 06:29 +0000, Matthew Brost wrote:
> On Wed, Jun 26, 2024 at 02:38:32PM +0200, Thomas Hellström wrote:
> > Current xe kmd allows for a maximum working set of VRAM plus
> > half of system memory, or if the working set is allowed only in
> > VRAM, the working set is limited to VRAM.
> > 
> > Some subtests attempt to exceed that. Detect when that happens
> > and limit the working set accordingly.
> > 
> > v2:
> > - The determination for which flags system bos are allowed in the
> >   working set was incorrect. Fix. (Zbigniew Kempczyński)
> > - Fix a typo.
> > - Add an assert that vram_size is indeed > 0.
> >   (Zbigniew Kempczyński, Thomas)
> > - Add asserts and make sure that the bo is bound to the same
> >   vm the exec_queue is using.
> > - Increase the allowed set size for the multi-vm test.
> > 
> > Cc: Matthew Brost <matthew.brost at intel.com>
> > Cc: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
> > Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
> > Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> > ---
> >  tests/intel/xe_evict.c | 89 ++++++++++++++++++++++++++++++++++++--
> > ----
> >  1 file changed, 76 insertions(+), 13 deletions(-)
> > 
> > diff --git a/tests/intel/xe_evict.c b/tests/intel/xe_evict.c
> > index eebdbc84b..5691ad021 100644
> > --- a/tests/intel/xe_evict.c
> > +++ b/tests/intel/xe_evict.c
> > @@ -97,6 +97,7 @@ test_evict(int fd, struct
> > drm_xe_engine_class_instance *eci,
> >                          uint32_t _vm = (flags & EXTERNAL_OBJ) &&
> >                                  i < n_execs / 8 ? 0 : vm;
> >  
> > +			igt_assert((e & 1) == (i & 1));
> >  			if (flags & MULTI_VM) {
> >  				__bo = bo[i] = xe_bo_create(fd, 0,
> >  							   
> > bo_size,
> > @@ -115,6 +116,7 @@ test_evict(int fd, struct
> > drm_xe_engine_class_instance *eci,
> >  							   
> > DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> >  			}
> >  		} else {
> > +			igt_assert((e & 1) == ((i % (n_execs / 2))
> > & 1));
> >  			__bo = bo[i % (n_execs / 2)];
> >  		}
> >  		if (i)
> > @@ -273,6 +275,7 @@ test_evict_cm(int fd, struct
> > drm_xe_engine_class_instance *eci,
> >                          uint32_t _vm = (flags & EXTERNAL_OBJ) &&
> >                                  i < n_execs / 8 ? 0 : vm;
> >  
> > +			igt_assert((e & 1) == (i & 1));
> >  			if (flags & MULTI_VM) {
> >  				__bo = bo[i] = xe_bo_create(fd, 0,
> >  							   
> > bo_size,
> > @@ -291,6 +294,7 @@ test_evict_cm(int fd, struct
> > drm_xe_engine_class_instance *eci,
> >  							   
> > DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
> >  			}
> >  		} else {
> > +			igt_assert((e & 1) == ((i % (n_execs / 2))
> > & 1));
> >  			__bo = bo[i % (n_execs / 2)];
> >  		}
> >  		if (i)
> > @@ -458,6 +462,46 @@ static uint64_t calc_bo_size(uint64_t
> > vram_size, int mul, int div)
> >  		return (ALIGN(vram_size, SZ_256M)  * mul) / div;
> > /* small-bar */
> >  }
> >  
> > +static unsigned int working_set(uint64_t vram_size, uint64_t
> > system_size,
> > +				uint64_t bo_size, unsigned int
> > num_threads,
> > +				unsigned int flags)
> > +{
> > +	uint64_t set_size;
> > +	uint64_t total_size;
> > +
> > +	igt_assert(vram_size > 0);
> > +
> > +	set_size = (vram_size - 1) / bo_size;
> > +
> > +	/*
> > +	 * Working set resides also in system?
> > +	 * Currently system graphics memory is limited to 50% of
> > total.
> > +	 */
> > +	if (!(flags & (THREADED | MULTI_VM)))
> > +		set_size += (system_size / 2) / bo_size;
> > +
> > +	/* Set sizes are per vm. In the multi-vm case we use 2
> > vms. */
> > +	if (flags & MULTI_VM)
> > +		set_size *= 2;
> > +
> > +	/* All bos must fit in memory, assuming no swapping */
> > +	total_size = ((vram_size - 1) / bo_size + system_size /
> > bo_size) /
> 
> Should this not be '(system_size / 2) / bo_size'?

No, bos swapped out to shmem are allowed to fill system memory.

However, system_size is a bit aggressive if there is no swap-space...

/Thomas



> 
> Matt
> 
> > +		num_threads;
> > +
> > +	if (set_size > total_size)
> > +		set_size = total_size;
> > +
> > +	/* bos are only created on half of the execs. */
> > +	set_size *= 2;
> > +
> > +	/*
> > +	 * Align down to ensure the vm the bo is bound to matches
> > the vm
> > +	 * used by the exec_queue, fulfilling the asserts in the
> > +	 * tests.
> > +	 */
> > +	return ALIGN_DOWN(set_size, 4);
> > +}
> > +
> >  /**
> >   * SUBTEST: evict-%s
> >   * Description:  %arg[1] evict test.
> > @@ -748,6 +792,7 @@ igt_main
> >  		{ NULL },
> >  	};
> >  	uint64_t vram_size;
> > +	uint64_t system_size;
> >  	int fd;
> >  
> >  	igt_fixture {
> > @@ -755,14 +800,16 @@ igt_main
> >  		igt_require(xe_has_vram(fd));
> >  		vram_size = xe_visible_vram_size(fd, 0);
> >  		igt_assert(vram_size);
> > +		system_size = igt_get_avail_ram_mb() << 20;
> >  
> >  		/* Test requires SRAM to about as big as VRAM. For
> > example, small-cm creates
> >  		 * (448 / 2) BOs with a size (1 / 128) of the
> > total VRAM size. For
> >  		 * simplicity ensure the SRAM size >= VRAM before
> > running this test.
> >  		 */
> > -		igt_skip_on_f(igt_get_avail_ram_mb() < (vram_size
> > >> 20),
> > -			      "System memory %lu MiB is less than
> > local memory %lu MiB\n",
> > -			      igt_get_avail_ram_mb(), vram_size >>
> > 20);
> > +		igt_skip_on_f(system_size < vram_size,
> > +			      "System memory %llu MiB is less than
> > local memory %llu MiB\n",
> > +			      (unsigned long long)system_size >>
> > 20,
> > +			      (unsigned long long)vram_size >>
> > 20);
> >  
> >  		xe_for_each_engine(fd, hwe)
> >  			if (hwe->engine_class !=
> > DRM_XE_ENGINE_CLASS_COPY)
> > @@ -770,25 +817,41 @@ igt_main
> >  	}
> >  
> >  	for (const struct section *s = sections; s->name; s++) {
> > -		igt_subtest_f("evict-%s", s->name)
> > -			test_evict(fd, hwe, s->n_exec_queues, s-
> > >n_execs,
> > -				   calc_bo_size(vram_size, s->mul,
> > s->div),
> > +		igt_subtest_f("evict-%s", s->name) {
> > +			uint64_t bo_size = calc_bo_size(vram_size,
> > s->mul, s->div);
> > +			int ws = working_set(vram_size,
> > system_size, bo_size,
> > +					     1, s->flags);
> > +
> > +			igt_debug("Max working set %d n_execs
> > %d\n", ws, s->n_execs);
> > +			test_evict(fd, hwe, s->n_exec_queues,
> > +				   min(ws, s->n_execs), bo_size,
> >  				   s->flags, NULL);
> > +		}
> >  	}
> >  
> >  	for (const struct section_cm *s = sections_cm; s->name;
> > s++) {
> > -		igt_subtest_f("evict-%s", s->name)
> > -			test_evict_cm(fd, hwe, s->n_exec_queues,
> > s->n_execs,
> > -				      calc_bo_size(vram_size, s-
> > >mul, s->div),
> > +		igt_subtest_f("evict-%s", s->name) {
> > +			uint64_t bo_size = calc_bo_size(vram_size,
> > s->mul, s->div);
> > +			int ws = working_set(vram_size,
> > system_size, bo_size,
> > +					     1, s->flags);
> > +
> > +			igt_debug("Max working set %d n_execs
> > %d\n", ws, s->n_execs);
> > +			test_evict_cm(fd, hwe, s->n_exec_queues,
> > +				      min(ws, s->n_execs),
> > bo_size,
> >  				      s->flags, NULL);
> > +		}
> >  	}
> >  
> >  	for (const struct section_threads *s = sections_threads;
> > s->name; s++) {
> > -		igt_subtest_f("evict-%s", s->name)
> > +		igt_subtest_f("evict-%s", s->name) {
> > +			uint64_t bo_size = calc_bo_size(vram_size,
> > s->mul, s->div);
> > +			int ws = working_set(vram_size,
> > system_size, bo_size,
> > +					     s->n_threads, s-
> > >flags);
> > +
> > +			igt_debug("Max working set %d n_execs
> > %d\n", ws, s->n_execs);
> >  			threads(fd, hwe, s->n_threads, s-
> > >n_exec_queues,
> > -				 s->n_execs,
> > -				 calc_bo_size(vram_size, s->mul,
> > s->div),
> > -				 s->flags);
> > +				min(ws, s->n_execs), bo_size, s-
> > >flags);
> > +		}
> >  	}
> >  
> >  	igt_fixture
> > -- 
> > 2.44.0
> > 



More information about the igt-dev mailing list