[PATCH 4/9] drm/i915/dmc: Extract dmc_load_program()

Ville Syrjälä ville.syrjala at linux.intel.com
Fri Jun 13 14:18:27 UTC 2025


On Thu, Jun 12, 2025 at 08:16:51PM +0000, Shankar, Uma wrote:
> 
> 
> > -----Original Message-----
> > From: Intel-gfx <intel-gfx-bounces at lists.freedesktop.org> On Behalf Of Ville
> > Syrjala
> > Sent: Wednesday, June 11, 2025 9:23 PM
> > To: intel-gfx at lists.freedesktop.org
> > Cc: intel-xe at lists.freedesktop.org
> > Subject: [PATCH 4/9] drm/i915/dmc: Extract dmc_load_program()
> > 
> > From: Ville Syrjälä <ville.syrjala at linux.intel.com>
> > 
> > We'll be needing to reload the program for individual DMCs.
> > To make that possible pull the code to load the program for a single DMC into a
> > new function.
> > 
> > This does change the order of things during init/resume a bit; previously we loaded
> > the program RAM for all DMCs first, and then loaded the MMIO registers for all
> > DMCs. Now those operations will be interleaved between different DMCs.
> 
> Haven't found any documentation mandating this sequence, so should be ok.

I was also pondering about the safety of the whole DMC reloading
process. I think it still has lots of potential races:
- we disable the event handlers first, and the reload the program,
  but there is no explicit sync in between to guarantee the DMC isn't
  still executing one of the old handlers. Not sure what such a sync
  would be though since there are various triggers. Maybe a vblank wait
  or two (if the pipe is active) would at least guarantee all the frame
  timing related triggers would be done.
- we load the firmware mmio list in the order specified by the firmware,
  which always seems to have the EVT_CTL before the EVT_HTP. That means
  we are enabling the events before the HTP is in place, potentially
  causing the DMC to start executing from some random location. We could
  eg. do two loops over the mmio list, first loop would do all the !EVT_CTL
  registers, and a second loop would do just the EVT_CTL registers.

Though I'm not sure the GOP even loads any DMC firmware, so perhaps none 
of this is really matters for normal use cases.

> 
> Reviewed-by: Uma Shankar <uma.shankar at intel.com>
> 
> > Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
> > ---
> >  drivers/gpu/drm/i915/display/intel_dmc.c | 78 +++++++++++++-----------
> >  1 file changed, 42 insertions(+), 36 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c
> > b/drivers/gpu/drm/i915/display/intel_dmc.c
> > index 5a43298cd0e7..331db28039db 100644
> > --- a/drivers/gpu/drm/i915/display/intel_dmc.c
> > +++ b/drivers/gpu/drm/i915/display/intel_dmc.c
> > @@ -432,25 +432,22 @@ static void disable_event_handler(struct intel_display
> > *display,
> >  	intel_de_write(display, htp_reg, 0);
> >  }
> > 
> > -static void disable_all_event_handlers(struct intel_display *display)
> > +static void disable_all_event_handlers(struct intel_display *display,
> > +				       enum intel_dmc_id dmc_id)
> >  {
> > -	enum intel_dmc_id dmc_id;
> > +	int handler;
> > 
> >  	/* TODO: disable the event handlers on pre-GEN12 platforms as well */
> >  	if (DISPLAY_VER(display) < 12)
> >  		return;
> > 
> > -	for_each_dmc_id(dmc_id) {
> > -		int handler;
> > +	if (!has_dmc_id_fw(display, dmc_id))
> > +		return;
> > 
> > -		if (!has_dmc_id_fw(display, dmc_id))
> > -			continue;
> > -
> > -		for (handler = 0; handler <
> > DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
> > -			disable_event_handler(display,
> > -					      DMC_EVT_CTL(display, dmc_id,
> > handler),
> > -					      DMC_EVT_HTP(display, dmc_id,
> > handler));
> > -	}
> > +	for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12;
> > handler++)
> > +		disable_event_handler(display,
> > +				      DMC_EVT_CTL(display, dmc_id, handler),
> > +				      DMC_EVT_HTP(display, dmc_id, handler));
> >  }
> > 
> >  static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool
> > enable) @@ -578,6 +575,30 @@ static u32 dmc_mmiodata(struct intel_display
> > *display,
> >  		return dmc->dmc_info[dmc_id].mmiodata[i];
> >  }
> > 
> > +static void dmc_load_program(struct intel_display *display,
> > +			     enum intel_dmc_id dmc_id)
> > +{
> > +	struct intel_dmc *dmc = display_to_dmc(display);
> > +	int i;
> > +
> > +	disable_all_event_handlers(display, dmc_id);
> > +
> > +	preempt_disable();
> > +
> > +	for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
> > +		intel_de_write_fw(display,
> > +				  DMC_PROGRAM(dmc-
> > >dmc_info[dmc_id].start_mmioaddr, i),
> > +				  dmc->dmc_info[dmc_id].payload[i]);
> > +	}
> > +
> > +	preempt_enable();
> > +
> > +	for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
> > +		intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
> > +			       dmc_mmiodata(display, dmc, dmc_id, i));
> > +	}
> > +}
> > +
> >  void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe)  {
> >  	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); @@ -685,37
> > +706,17 @@ void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct
> > intel_display  void intel_dmc_load_program(struct intel_display *display)  {
> >  	struct i915_power_domains *power_domains = &display->power.domains;
> > -	struct intel_dmc *dmc = display_to_dmc(display);
> >  	enum intel_dmc_id dmc_id;
> > -	u32 i;
> > 
> >  	if (!intel_dmc_has_payload(display))
> >  		return;
> > 
> > -	pipedmc_clock_gating_wa(display, true);
> > -
> > -	disable_all_event_handlers(display);
> > -
> >  	assert_display_rpm_held(display);
> > 
> > -	preempt_disable();
> > +	pipedmc_clock_gating_wa(display, true);
> > 
> > -	for_each_dmc_id(dmc_id) {
> > -		for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
> > -			intel_de_write_fw(display,
> > -					  DMC_PROGRAM(dmc-
> > >dmc_info[dmc_id].start_mmioaddr, i),
> > -					  dmc->dmc_info[dmc_id].payload[i]);
> > -		}
> > -	}
> > -
> > -	preempt_enable();
> > -
> > -	for_each_dmc_id(dmc_id) {
> > -		for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
> > -			intel_de_write(display, dmc-
> > >dmc_info[dmc_id].mmioaddr[i],
> > -				       dmc_mmiodata(display, dmc, dmc_id, i));
> > -		}
> > -	}
> > +	for_each_dmc_id(dmc_id)
> > +		dmc_load_program(display, dmc_id);
> > 
> >  	power_domains->dc_state = 0;
> > 
> > @@ -733,11 +734,16 @@ void intel_dmc_load_program(struct intel_display
> > *display)
> >   */
> >  void intel_dmc_disable_program(struct intel_display *display)  {
> > +	enum intel_dmc_id dmc_id;
> > +
> >  	if (!intel_dmc_has_payload(display))
> >  		return;
> > 
> >  	pipedmc_clock_gating_wa(display, true);
> > -	disable_all_event_handlers(display);
> > +
> > +	for_each_dmc_id(dmc_id)
> > +		disable_all_event_handlers(display, dmc_id);
> > +
> >  	pipedmc_clock_gating_wa(display, false);  }
> > 
> > --
> > 2.49.0
> 

-- 
Ville Syrjälä
Intel


More information about the Intel-gfx mailing list